2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
5 * Module name: iSeries_setup.c
8 * Architecture- / platform-specific boot-time initialization code for
9 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
10 * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
21 #include <linux/config.h>
22 #include <linux/init.h>
23 #include <linux/threads.h>
24 #include <linux/smp.h>
25 #include <linux/param.h>
26 #include <linux/string.h>
27 #include <linux/initrd.h>
28 #include <linux/seq_file.h>
29 #include <linux/kdev_t.h>
30 #include <linux/major.h>
31 #include <linux/root_dev.h>
33 #include <asm/processor.h>
34 #include <asm/machdep.h>
37 #include <asm/pgtable.h>
38 #include <asm/mmu_context.h>
39 #include <asm/cputable.h>
40 #include <asm/sections.h>
41 #include <asm/iommu.h>
42 #include <asm/firmware.h>
45 #include "iSeries_setup.h"
48 #include <asm/cache.h>
49 #include <asm/sections.h>
50 #include <asm/abs_addr.h>
51 #include <asm/iSeries/HvCallHpt.h>
52 #include <asm/iSeries/HvLpConfig.h>
53 #include <asm/iSeries/HvCallEvent.h>
54 #include <asm/iSeries/HvCallSm.h>
55 #include <asm/iSeries/HvCallXm.h>
56 #include <asm/iSeries/ItLpQueue.h>
57 #include <asm/iSeries/IoHriMainStore.h>
58 #include <asm/iSeries/mf.h>
59 #include <asm/iSeries/HvLpEvent.h>
60 #include <asm/iSeries/iSeries_irq.h>
61 #include <asm/iSeries/IoHriProcessorVpd.h>
62 #include <asm/iSeries/ItVpdAreas.h>
63 #include <asm/iSeries/LparMap.h>
65 extern void hvlog(char *fmt, ...);
68 #define DBG(fmt...) hvlog(fmt)
73 /* Function Prototypes */
74 extern void ppcdbg_initialize(void);
76 static void build_iSeries_Memory_Map(void);
77 static void setup_iSeries_cache_sizes(void);
78 static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr);
79 static int iseries_shared_idle(void);
80 static int iseries_dedicated_idle(void);
82 extern void iSeries_pci_final_fixup(void);
84 static void iSeries_pci_final_fixup(void) { }
87 /* Global Variables */
88 static unsigned long procFreqHz;
89 static unsigned long procFreqMhz;
90 static unsigned long procFreqMhzHundreths;
92 static unsigned long tbFreqHz;
93 static unsigned long tbFreqMhz;
94 static unsigned long tbFreqMhzHundreths;
96 int piranha_simulator;
98 extern int rd_size; /* Defined in drivers/block/rd.c */
99 extern unsigned long klimit;
100 extern unsigned long embedded_sysmap_start;
101 extern unsigned long embedded_sysmap_end;
103 extern unsigned long iSeries_recal_tb;
104 extern unsigned long iSeries_recal_titan;
106 static int mf_initialized;
109 unsigned long absStart;
110 unsigned long absEnd;
111 unsigned long logicalStart;
112 unsigned long logicalEnd;
116 * Process the main store vpd to determine where the holes in memory are
117 * and return the number of physical blocks and fill in the array of
120 static unsigned long iSeries_process_Condor_mainstore_vpd(
121 struct MemoryBlock *mb_array, unsigned long max_entries)
123 unsigned long holeFirstChunk, holeSizeChunks;
124 unsigned long numMemoryBlocks = 1;
125 struct IoHriMainStoreSegment4 *msVpd =
126 (struct IoHriMainStoreSegment4 *)xMsVpd;
127 unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
128 unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
129 unsigned long holeSize = holeEnd - holeStart;
131 printk("Mainstore_VPD: Condor\n");
133 * Determine if absolute memory has any
134 * holes so that we can interpret the
135 * access map we get back from the hypervisor
138 mb_array[0].logicalStart = 0;
139 mb_array[0].logicalEnd = 0x100000000;
140 mb_array[0].absStart = 0;
141 mb_array[0].absEnd = 0x100000000;
145 holeStart = holeStart & 0x000fffffffffffff;
146 holeStart = addr_to_chunk(holeStart);
147 holeFirstChunk = holeStart;
148 holeSize = addr_to_chunk(holeSize);
149 holeSizeChunks = holeSize;
150 printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
151 holeFirstChunk, holeSizeChunks );
152 mb_array[0].logicalEnd = holeFirstChunk;
153 mb_array[0].absEnd = holeFirstChunk;
154 mb_array[1].logicalStart = holeFirstChunk;
155 mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks;
156 mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
157 mb_array[1].absEnd = 0x100000000;
159 return numMemoryBlocks;
162 #define MaxSegmentAreas 32
163 #define MaxSegmentAdrRangeBlocks 128
164 #define MaxAreaRangeBlocks 4
166 static unsigned long iSeries_process_Regatta_mainstore_vpd(
167 struct MemoryBlock *mb_array, unsigned long max_entries)
169 struct IoHriMainStoreSegment5 *msVpdP =
170 (struct IoHriMainStoreSegment5 *)xMsVpd;
171 unsigned long numSegmentBlocks = 0;
172 u32 existsBits = msVpdP->msAreaExists;
173 unsigned long area_num;
175 printk("Mainstore_VPD: Regatta\n");
177 for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
178 unsigned long numAreaBlocks;
179 struct IoHriMainStoreArea4 *currentArea;
181 if (existsBits & 0x80000000) {
182 unsigned long block_num;
184 currentArea = &msVpdP->msAreaArray[area_num];
185 numAreaBlocks = currentArea->numAdrRangeBlocks;
186 printk("ms_vpd: processing area %2ld blocks=%ld",
187 area_num, numAreaBlocks);
188 for (block_num = 0; block_num < numAreaBlocks;
190 /* Process an address range block */
191 struct MemoryBlock tempBlock;
195 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
197 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
198 tempBlock.logicalStart = 0;
199 tempBlock.logicalEnd = 0;
200 printk("\n block %ld absStart=%016lx absEnd=%016lx",
201 block_num, tempBlock.absStart,
204 for (i = 0; i < numSegmentBlocks; ++i) {
205 if (mb_array[i].absStart ==
209 if (i == numSegmentBlocks) {
210 if (numSegmentBlocks == max_entries)
211 panic("iSeries_process_mainstore_vpd: too many memory blocks");
212 mb_array[numSegmentBlocks] = tempBlock;
215 printk(" (duplicate)");
221 /* Now sort the blocks found into ascending sequence */
222 if (numSegmentBlocks > 1) {
225 for (m = 0; m < numSegmentBlocks - 1; ++m) {
226 for (n = numSegmentBlocks - 1; m < n; --n) {
227 if (mb_array[n].absStart <
228 mb_array[n-1].absStart) {
229 struct MemoryBlock tempBlock;
231 tempBlock = mb_array[n];
232 mb_array[n] = mb_array[n-1];
233 mb_array[n-1] = tempBlock;
239 * Assign "logical" addresses to each block. These
240 * addresses correspond to the hypervisor "bitmap" space.
241 * Convert all addresses into units of 256K chunks.
244 unsigned long i, nextBitmapAddress;
246 printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
247 nextBitmapAddress = 0;
248 for (i = 0; i < numSegmentBlocks; ++i) {
249 unsigned long length = mb_array[i].absEnd -
250 mb_array[i].absStart;
252 mb_array[i].logicalStart = nextBitmapAddress;
253 mb_array[i].logicalEnd = nextBitmapAddress + length;
254 nextBitmapAddress += length;
255 printk(" Bitmap range: %016lx - %016lx\n"
256 " Absolute range: %016lx - %016lx\n",
257 mb_array[i].logicalStart,
258 mb_array[i].logicalEnd,
259 mb_array[i].absStart, mb_array[i].absEnd);
260 mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
262 mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
264 mb_array[i].logicalStart =
265 addr_to_chunk(mb_array[i].logicalStart);
266 mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
270 return numSegmentBlocks;
273 static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
274 unsigned long max_entries)
277 unsigned long mem_blocks = 0;
279 if (cpu_has_feature(CPU_FTR_SLB))
280 mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
283 mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array,
286 printk("Mainstore_VPD: numMemoryBlocks = %ld \n", mem_blocks);
287 for (i = 0; i < mem_blocks; ++i) {
288 printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
289 " abs chunks %016lx - %016lx\n",
290 i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
291 mb_array[i].absStart, mb_array[i].absEnd);
296 static void __init iSeries_get_cmdline(void)
300 /* copy the command line parameter from the primary VSP */
301 HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
302 HvLpDma_Direction_RemoteToLocal);
307 if (!*p || *p == '\n')
314 static void __init iSeries_init_early(void)
316 extern unsigned long memory_limit;
318 DBG(" -> iSeries_init_early()\n");
320 ppc64_firmware_features = FW_FEATURE_ISERIES;
324 #if defined(CONFIG_BLK_DEV_INITRD)
326 * If the init RAM disk has been configured and there is
327 * a non-zero starting address for it, set it up
330 initrd_start = (unsigned long)__va(naca.xRamDisk);
331 initrd_end = initrd_start + naca.xRamDiskSize * PAGE_SIZE;
332 initrd_below_start_ok = 1; // ramdisk in kernel space
333 ROOT_DEV = Root_RAM0;
334 if (((rd_size * 1024) / PAGE_SIZE) < naca.xRamDiskSize)
335 rd_size = (naca.xRamDiskSize * PAGE_SIZE) / 1024;
337 #endif /* CONFIG_BLK_DEV_INITRD */
339 /* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */
342 iSeries_recal_tb = get_tb();
343 iSeries_recal_titan = HvCallXm_loadTod();
346 * Cache sizes must be initialized before hpte_init_iSeries is called
347 * as the later need them for flush_icache_range()
349 setup_iSeries_cache_sizes();
352 * Initialize the hash table management pointers
357 * Initialize the DMA/TCE management
359 iommu_init_early_iSeries();
362 * Initialize the table which translate Linux physical addresses to
363 * AS/400 absolute addresses
365 build_iSeries_Memory_Map();
367 iSeries_get_cmdline();
369 /* Save unparsed command line copy for /proc/cmdline */
370 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
372 /* Parse early parameters, in particular mem=x */
376 if (memory_limit < systemcfg->physicalMemorySize)
377 systemcfg->physicalMemorySize = memory_limit;
379 printk("Ignoring mem=%lu >= ram_top.\n", memory_limit);
384 /* Bolt kernel mappings for all of memory (or just a bit if we've got a limit) */
385 iSeries_bolt_kernel(0, systemcfg->physicalMemorySize);
388 lmb_add(0, systemcfg->physicalMemorySize);
390 lmb_reserve(0, __pa(klimit));
392 /* Initialize machine-dependency vectors */
396 if (itLpNaca.xPirEnvironMode == 0)
397 piranha_simulator = 1;
399 /* Associate Lp Event Queue 0 with processor 0 */
400 HvCallEvent_setLpEventQueueInterruptProc(0, 0);
406 /* If we were passed an initrd, set the ROOT_DEV properly if the values
407 * look sensible. If not, clear initrd reference.
409 #ifdef CONFIG_BLK_DEV_INITRD
410 if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
411 initrd_end > initrd_start)
412 ROOT_DEV = Root_RAM0;
414 initrd_start = initrd_end = 0;
415 #endif /* CONFIG_BLK_DEV_INITRD */
417 DBG(" <- iSeries_init_early()\n");
420 struct mschunks_map mschunks_map = {
421 /* XXX We don't use these, but Piranha might need them. */
422 .chunk_size = MSCHUNKS_CHUNK_SIZE,
423 .chunk_shift = MSCHUNKS_CHUNK_SHIFT,
424 .chunk_mask = MSCHUNKS_OFFSET_MASK,
426 EXPORT_SYMBOL(mschunks_map);
428 void mschunks_alloc(unsigned long num_chunks)
430 klimit = _ALIGN(klimit, sizeof(u32));
431 mschunks_map.mapping = (u32 *)klimit;
432 klimit += num_chunks * sizeof(u32);
433 mschunks_map.num_chunks = num_chunks;
437 * The iSeries may have very large memories ( > 128 GB ) and a partition
438 * may get memory in "chunks" that may be anywhere in the 2**52 real
439 * address space. The chunks are 256K in size. To map this to the
440 * memory model Linux expects, the AS/400 specific code builds a
441 * translation table to translate what Linux thinks are "physical"
442 * addresses to the actual real addresses. This allows us to make
443 * it appear to Linux that we have contiguous memory starting at
444 * physical address zero while in fact this could be far from the truth.
445 * To avoid confusion, I'll let the words physical and/or real address
446 * apply to the Linux addresses while I'll use "absolute address" to
447 * refer to the actual hardware real address.
449 * build_iSeries_Memory_Map gets information from the Hypervisor and
450 * looks at the Main Store VPD to determine the absolute addresses
451 * of the memory that has been assigned to our partition and builds
452 * a table used to translate Linux's physical addresses to these
453 * absolute addresses. Absolute addresses are needed when
454 * communicating with the hypervisor (e.g. to build HPT entries)
457 static void __init build_iSeries_Memory_Map(void)
459 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
461 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
463 u32 totalChunks,moreChunks;
464 u32 currChunk, thisChunk, absChunk;
468 struct MemoryBlock mb[32];
469 unsigned long numMemoryBlocks, curBlock;
471 /* Chunk size on iSeries is 256K bytes */
472 totalChunks = (u32)HvLpConfig_getMsChunks();
473 mschunks_alloc(totalChunks);
476 * Get absolute address of our load area
477 * and map it to physical address 0
478 * This guarantees that the loadarea ends up at physical 0
479 * otherwise, it might not be returned by PLIC as the first
483 loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
484 loadAreaSize = itLpNaca.xLoadAreaChunks;
487 * Only add the pages already mapped here.
488 * Otherwise we might add the hpt pages
489 * The rest of the pages of the load area
490 * aren't in the HPT yet and can still
491 * be assigned an arbitrary physical address
493 if ((loadAreaSize * 64) > HvPagesToMap)
494 loadAreaSize = HvPagesToMap / 64;
496 loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
499 * TODO Do we need to do something if the HPT is in the 64MB load area?
500 * This would be required if the itLpNaca.xLoadAreaChunks includes
504 printk("Mapping load area - physical addr = 0000000000000000\n"
505 " absolute addr = %016lx\n",
506 chunk_to_addr(loadAreaFirstChunk));
507 printk("Load area size %dK\n", loadAreaSize * 256);
509 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
510 mschunks_map.mapping[nextPhysChunk] =
511 loadAreaFirstChunk + nextPhysChunk;
514 * Get absolute address of our HPT and remember it so
515 * we won't map it to any physical address
517 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
518 hptSizePages = (u32)HvCallHpt_getHptPages();
519 hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT);
520 hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
522 printk("HPT absolute addr = %016lx, size = %dK\n",
523 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
525 /* Fill in the hashed page table hash mask */
526 num_ptegs = hptSizePages *
527 (PAGE_SIZE / (sizeof(hpte_t) * HPTES_PER_GROUP));
528 htab_hash_mask = num_ptegs - 1;
531 * The actual hashed page table is in the hypervisor,
532 * we have no direct access
537 * Determine if absolute memory has any
538 * holes so that we can interpret the
539 * access map we get back from the hypervisor
542 numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32);
545 * Process the main store access map from the hypervisor
546 * to build up our physical -> absolute translation table
551 moreChunks = totalChunks;
554 map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex,
556 thisChunk = currChunk;
558 chunkBit = map >> 63;
562 while (thisChunk >= mb[curBlock].logicalEnd) {
564 if (curBlock >= numMemoryBlocks)
565 panic("out of memory blocks");
567 if (thisChunk < mb[curBlock].logicalStart)
568 panic("memory block error");
570 absChunk = mb[curBlock].absStart +
571 (thisChunk - mb[curBlock].logicalStart);
572 if (((absChunk < hptFirstChunk) ||
573 (absChunk > hptLastChunk)) &&
574 ((absChunk < loadAreaFirstChunk) ||
575 (absChunk > loadAreaLastChunk))) {
576 mschunks_map.mapping[nextPhysChunk] =
588 * main store size (in chunks) is
589 * totalChunks - hptSizeChunks
590 * which should be equal to
593 systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk);
597 * Set up the variables that describe the cache line sizes
600 static void __init setup_iSeries_cache_sizes(void)
603 unsigned int procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
605 systemcfg->icache_size =
606 ppc64_caches.isize = xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
607 systemcfg->icache_line_size =
608 ppc64_caches.iline_size =
609 xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
610 systemcfg->dcache_size =
612 xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
613 systemcfg->dcache_line_size =
614 ppc64_caches.dline_size =
615 xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
616 ppc64_caches.ilines_per_page = PAGE_SIZE / ppc64_caches.iline_size;
617 ppc64_caches.dlines_per_page = PAGE_SIZE / ppc64_caches.dline_size;
619 i = ppc64_caches.iline_size;
621 while ((i = (i / 2)))
623 ppc64_caches.log_iline_size = n;
625 i = ppc64_caches.dline_size;
627 while ((i = (i / 2)))
629 ppc64_caches.log_dline_size = n;
631 printk("D-cache line size = %d\n",
632 (unsigned int)ppc64_caches.dline_size);
633 printk("I-cache line size = %d\n",
634 (unsigned int)ppc64_caches.iline_size);
638 * Create a pte. Used during initialization only.
640 static void iSeries_make_pte(unsigned long va, unsigned long pa,
643 hpte_t local_hpte, rhpte;
644 unsigned long hash, vpn;
647 vpn = va >> PAGE_SHIFT;
648 hash = hpt_hash(vpn, 0);
650 local_hpte.r = pa | mode;
651 local_hpte.v = ((va >> 23) << HPTE_V_AVPN_SHIFT)
652 | HPTE_V_BOLTED | HPTE_V_VALID;
654 slot = HvCallHpt_findValid(&rhpte, vpn);
656 /* Must find space in primary group */
657 panic("hash_page: hpte already exists\n");
659 HvCallHpt_addValidate(slot, 0, &local_hpte);
663 * Bolt the kernel addr space into the HPT
665 static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
668 unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
671 for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
672 unsigned long ea = (unsigned long)__va(pa);
673 unsigned long vsid = get_kernel_vsid(ea);
674 unsigned long va = (vsid << 28) | (pa & 0xfffffff);
675 unsigned long vpn = va >> PAGE_SHIFT;
676 unsigned long slot = HvCallHpt_findValid(&hpte, vpn);
678 /* Make non-kernel text non-executable */
679 if (!in_kernel_text(ea))
680 mode_rw |= HW_NO_EXEC;
682 if (hpte.v & HPTE_V_VALID) {
683 /* HPTE exists, so just bolt it */
684 HvCallHpt_setSwBits(slot, 0x10, 0);
685 /* And make sure the pp bits are correct */
686 HvCallHpt_setPp(slot, PP_RWXX);
688 /* No HPTE exists, so create a new bolted one */
689 iSeries_make_pte(va, phys_to_abs(pa), mode_rw);
696 static void __init iSeries_setup_arch(void)
698 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
700 if (get_paca()->lppaca.shared_proc) {
701 ppc_md.idle_loop = iseries_shared_idle;
702 printk(KERN_INFO "Using shared processor idle loop\n");
704 ppc_md.idle_loop = iseries_dedicated_idle;
705 printk(KERN_INFO "Using dedicated idle loop\n");
708 /* Add an eye catcher and the systemcfg layout version number */
709 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
710 systemcfg->version.major = SYSTEMCFG_MAJOR;
711 systemcfg->version.minor = SYSTEMCFG_MINOR;
713 /* Setup the Lp Event Queue */
714 setup_hvlpevent_queue();
716 /* Compute processor frequency */
717 procFreqHz = ((1UL << 34) * 1000000) /
718 xIoHriProcessorVpd[procIx].xProcFreq;
719 procFreqMhz = procFreqHz / 1000000;
720 procFreqMhzHundreths = (procFreqHz / 10000) - (procFreqMhz * 100);
721 ppc_proc_freq = procFreqHz;
723 /* Compute time base frequency */
724 tbFreqHz = ((1UL << 32) * 1000000) /
725 xIoHriProcessorVpd[procIx].xTimeBaseFreq;
726 tbFreqMhz = tbFreqHz / 1000000;
727 tbFreqMhzHundreths = (tbFreqHz / 10000) - (tbFreqMhz * 100);
728 ppc_tb_freq = tbFreqHz;
730 printk("Max logical processors = %d\n",
731 itVpdAreas.xSlicMaxLogicalProcs);
732 printk("Max physical processors = %d\n",
733 itVpdAreas.xSlicMaxPhysicalProcs);
734 printk("Processor frequency = %lu.%02lu\n", procFreqMhz,
735 procFreqMhzHundreths);
736 printk("Time base frequency = %lu.%02lu\n", tbFreqMhz,
738 systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
739 printk("Processor version = %x\n", systemcfg->processor);
742 static void iSeries_get_cpuinfo(struct seq_file *m)
744 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
751 static int iSeries_get_irq(struct pt_regs *regs)
753 /* -2 means ignore this interrupt */
760 static void iSeries_restart(char *cmd)
768 static void iSeries_power_off(void)
776 static void iSeries_halt(void)
782 * void __init iSeries_calibrate_decr()
785 * This routine retrieves the internal processor frequency from the VPD,
786 * and sets up the kernel timer decrementer based on that value.
789 static void __init iSeries_calibrate_decr(void)
791 unsigned long cyclesPerUsec;
792 struct div_result divres;
794 /* Compute decrementer (and TB) frequency in cycles/sec */
795 cyclesPerUsec = ppc_tb_freq / 1000000;
798 * Set the amount to refresh the decrementer by. This
799 * is the number of decrementer ticks it takes for
802 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
805 /* TEST CODE FOR ADJTIME */
806 tb_ticks_per_jiffy += tb_ticks_per_jiffy / 5000;
807 /* END OF TEST CODE */
811 * tb_ticks_per_sec = freq; would give better accuracy
812 * but tb_ticks_per_sec = tb_ticks_per_jiffy*HZ; assures
813 * that jiffies (and xtime) will match the time returned
814 * by do_gettimeofday.
816 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
817 tb_ticks_per_usec = cyclesPerUsec;
818 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
819 div128_by_32(1024 * 1024, 0, tb_ticks_per_sec, &divres);
820 tb_to_xs = divres.result_low;
821 setup_default_decr();
824 static void __init iSeries_progress(char * st, unsigned short code)
826 printk("Progress: [%04x] - %s\n", (unsigned)code, st);
827 if (!piranha_simulator && mf_initialized) {
829 mf_display_progress(code);
835 static void __init iSeries_fixup_klimit(void)
838 * Change klimit to take into account any ram disk
839 * that may be included
842 klimit = KERNELBASE + (u64)naca.xRamDisk +
843 (naca.xRamDiskSize * PAGE_SIZE);
846 * No ram disk was included - check and see if there
847 * was an embedded system map. Change klimit to take
848 * into account any embedded system map
850 if (embedded_sysmap_end)
851 klimit = KERNELBASE + ((embedded_sysmap_end + 4095) &
856 static int __init iSeries_src_init(void)
858 /* clear the progress line */
859 ppc_md.progress(" ", 0xffff);
863 late_initcall(iSeries_src_init);
865 static inline void process_iSeries_events(void)
867 asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
870 static void yield_shared_processor(void)
874 HvCall_setEnabledInterrupts(HvCall_MaskIPI |
880 /* Compute future tb value when yield should expire */
881 HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
884 * The decrementer stops during the yield. Force a fake decrementer
885 * here and let the timer_interrupt code sort out the actual time.
887 get_paca()->lppaca.int_dword.fields.decr_int = 1;
888 process_iSeries_events();
891 static int iseries_shared_idle(void)
894 while (!need_resched() && !hvlpevent_is_pending()) {
896 ppc64_runlatch_off();
898 /* Recheck with irqs off */
899 if (!need_resched() && !hvlpevent_is_pending())
900 yield_shared_processor();
908 if (hvlpevent_is_pending())
909 process_iSeries_events();
917 static int iseries_dedicated_idle(void)
922 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
925 set_thread_flag(TIF_POLLING_NRFLAG);
927 while (!need_resched()) {
928 ppc64_runlatch_off();
931 if (hvlpevent_is_pending()) {
934 process_iSeries_events();
939 clear_thread_flag(TIF_POLLING_NRFLAG);
952 void __init iSeries_init_IRQ(void) { }
955 struct machdep_calls __initdata iseries_md = {
956 .setup_arch = iSeries_setup_arch,
957 .get_cpuinfo = iSeries_get_cpuinfo,
958 .init_IRQ = iSeries_init_IRQ,
959 .get_irq = iSeries_get_irq,
960 .init_early = iSeries_init_early,
961 .pcibios_fixup = iSeries_pci_final_fixup,
962 .restart = iSeries_restart,
963 .power_off = iSeries_power_off,
964 .halt = iSeries_halt,
965 .get_boot_time = iSeries_get_boot_time,
966 .set_rtc_time = iSeries_set_rtc_time,
967 .get_rtc_time = iSeries_get_rtc_time,
968 .calibrate_decr = iSeries_calibrate_decr,
969 .progress = iSeries_progress,
970 /* XXX Implement enable_pmcs for iSeries */
973 void __init iSeries_early_setup(void)
975 iSeries_fixup_klimit();