patch-1.3.4 linux/arch/sparc/mm/init.c
Next file: linux/arch/sparc/mm/loadmmu.c
Previous file: linux/arch/sparc/mm/fault.c
Back to the patch index
Back to the overall index
- Lines: 408
- Date:
Sun Jun 11 05:46:13 1995
- Orig file:
v1.3.3/linux/arch/sparc/mm/init.c
- Orig date:
Wed Mar 1 09:12:33 1995
diff -u --recursive --new-file v1.3.3/linux/arch/sparc/mm/init.c linux/arch/sparc/mm/init.c
@@ -21,16 +21,29 @@
#include <asm/vac-ops.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/vaddrs.h>
extern void scsi_mem_init(unsigned long);
extern void sound_mem_init(void);
extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
-extern int map_the_prom(int);
+struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+
+/* The following number keeps track of which page table is to
+ * next be allocated in a page. This is necessary since there
+ * are 16 page tables per page on the space.
+ */
+unsigned long ptr_in_current_pgd;
+
+/* This keeps track of which physical segments are in use right now. */
+unsigned int phys_seg_map[PSEG_ENTRIES];
+unsigned int phys_seg_life[PSEG_ENTRIES];
+
+/* Context allocation. */
+struct task_struct *ctx_tasks[MAX_CTXS];
+int ctx_tasks_last_frd;
-struct sparc_phys_banks sp_banks[14];
-unsigned long *sun4c_mmu_table;
extern int invalid_segment, num_segmaps, num_contexts;
/*
@@ -61,7 +74,7 @@
unsigned long __zero_page(void)
{
memset((void *) ZERO_PGE, 0, PAGE_SIZE);
- return ZERO_PGE;
+ return (unsigned long) ZERO_PGE;
}
void show_mem(void)
@@ -93,252 +106,140 @@
}
extern unsigned long free_area_init(unsigned long, unsigned long);
+extern pgprot_t protection_map[16];
/*
- * paging_init() sets up the page tables: in the alpha version this actually
- * unmaps the bootup page table (as we're now in KSEG, so we don't need it).
+ * paging_init() sets up the page tables: We call the MMU specific
+ * init routine based upon the Sun model type on the Sparc.
*
- * The bootup sequence put the virtual page table into high memory: that
- * means that we can change the L1 page table by just using VL1p below.
*/
+extern unsigned long sun4c_paging_init(unsigned long, unsigned long);
+extern unsigned long srmmu_paging_init(unsigned long, unsigned long);
+extern unsigned long probe_devices(unsigned long);
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
{
- unsigned long i, a, b, mask=0;
- unsigned long curseg, curpte, num_inval;
- unsigned long address;
- pte_t *pg_table;
-
- register int num_segs, num_ctx;
- register char * c;
-
- num_segs = num_segmaps;
- num_ctx = num_contexts;
-
- num_segs -= 1;
- invalid_segment = num_segs;
-
- start_mem = free_area_init(start_mem, end_mem);
-
-/* On the sparc we first need to allocate the segmaps for the
- * PROM's virtual space, and make those segmaps unusable. We
- * map the PROM in ALL contexts therefore the break key and the
- * sync command work no matter what state you took the machine
- * out of
- */
-
- printk("mapping the prom...\n");
- num_segs = map_the_prom(num_segs);
-
- start_mem = PAGE_ALIGN(start_mem);
+ int i;
- /* Set up static page tables in kernel space, this will be used
- * so that the low-level page fault handler can fill in missing
- * TLB entries since all mmu entries cannot be loaded at once
- * on the sun4c.
- */
-
-#if 0
- /* ugly debugging code */
- for(i=0; i<40960; i+=PAGE_SIZE)
- printk("address=0x%x vseg=%d pte=0x%x\n", (unsigned int) i,
- (int) get_segmap(i), (unsigned int) get_pte(i));
-#endif
+ switch(sparc_cpu_model) {
+ case sun4c:
+ start_mem = sun4c_paging_init(start_mem, end_mem);
+ break;
+ case sun4m:
+ case sun4d:
+ case sun4e:
+ start_mem = srmmu_paging_init(start_mem, end_mem);
+ break;
+ default:
+ printk("paging_init: Cannot init paging on this Sparc\n");
+ printk("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
+ printk("paging_init: Halting...\n");
+ halt();
+ };
+
+ /* Initialize context map. */
+ for(i=0; i<MAX_CTXS; i++) ctx_tasks[i] = NULL;
+
+ /* Initialize the protection map */
+ protection_map[0] = __P000;
+ protection_map[1] = __P001;
+ protection_map[2] = __P010;
+ protection_map[3] = __P011;
+ protection_map[4] = __P100;
+ protection_map[5] = __P101;
+ protection_map[6] = __P110;
+ protection_map[7] = __P111;
+ protection_map[8] = __S000;
+ protection_map[9] = __S001;
+ protection_map[10] = __S010;
+ protection_map[11] = __S011;
+ protection_map[12] = __S100;
+ protection_map[13] = __S101;
+ protection_map[14] = __S110;
+ protection_map[15] = __S111;
- printk("Setting up kernel static mmu table... bounce bounce\n");
+ start_mem = probe_devices(start_mem);
- address = 0; /* ((unsigned long) &end) + 524288; */
- sun4c_mmu_table = (unsigned long *) start_mem;
- pg_table = (pte_t *) start_mem;
- curseg = curpte = num_inval = 0;
- while(address < end_mem) {
- if(curpte == 0)
- put_segmap((address&PGDIR_MASK), curseg);
- for(i=0; sp_banks[i].num_bytes != 0; i++)
- if((address >= sp_banks[i].base_addr) &&
- (address <= (sp_banks[i].base_addr + sp_banks[i].num_bytes)))
- goto good_address;
- /* No physical memory here, so set the virtual segment to
- * the invalid one, and put an invalid pte in the static
- * kernel table.
- */
- *pg_table = mk_pte((address >> PAGE_SHIFT), PAGE_INVALID);
- pg_table++; curpte++; num_inval++;
- if(curpte > 63) {
- if(curpte == num_inval) {
- put_segmap((address&PGDIR_MASK), invalid_segment);
- } else {
- put_segmap((address&PGDIR_MASK), curseg);
- curseg++;
- }
- curpte = num_inval = 0;
- }
- address += PAGE_SIZE;
- continue;
-
- good_address:
- /* create pte entry */
- if(address < (((unsigned long) &end) + 524288)) {
- pte_val(*pg_table) = get_pte(address);
- } else {
- *pg_table = mk_pte((address >> PAGE_SHIFT), PAGE_KERNEL);
- put_pte(address, pte_val(*pg_table));
- }
-
- pg_table++; curpte++;
- if(curpte > 63) {
- put_segmap((address&PGDIR_MASK), curseg);
- curpte = num_inval = 0;
- curseg++;
- }
- address += PAGE_SIZE;
- }
-
- start_mem = (unsigned long) pg_table;
- /* ok, allocate the kernel pages, map them in all contexts
- * (with help from the prom), and lock them. Isn't the sparc
- * fun kiddies? TODO
- */
-
-#if 0
- /* ugly debugging code */
- for(i=0x1a3000; i<(0x1a3000+40960); i+=PAGE_SIZE)
- printk("address=0x%x vseg=%d pte=0x%x\n", (unsigned int) i,
- (int) get_segmap(i), (unsigned int) get_pte(i));
- halt();
-#endif
+ return start_mem;
+}
- b=PGDIR_ALIGN(start_mem)>>18;
- c= (char *)0x0;
+extern unsigned long sun4c_test_wp(unsigned long);
+extern void srmmu_test_wp(void);
- printk("mapping kernel in all contexts...\n");
+void mem_init(unsigned long start_mem, unsigned long end_mem)
+{
+ int codepages = 0;
+ int reservedpages = 0;
+ int datapages = 0;
+ unsigned long tmp2, addr;
+ extern char etext;
- for(a=0; a<b; a++)
- {
- for(i=0; i<num_contexts; i++)
- {
- /* map the kernel virt_addrs */
- (*(romvec->pv_setctxt))(i, (char *) c, a);
- }
- c += 0x40000;
- }
-
- /* Ok, since now mapped in all contexts, we can free up
- * context zero to be used amongst user processes.
- */
-
- /* free context 0 here TODO */
-
- /* invalidate all user pages and initialize the pte struct
- * for userland. TODO
- */
-
- /* Make the kernel text unwritable and cacheable, the prom
- * loaded our text as writable, only sneaky sunos kernels need
- * self-modifying code.
- */
-
- a= (unsigned long) &etext;
- mask=~(PTE_NC|PTE_W); /* make cacheable + not writable */
-
- /* must do for every segment since kernel uses all contexts
- * and unlike some sun kernels I know of, we can't hard wire
- * context 0 just for the kernel, that is unnecessary.
- */
-
- for(i=0; i<8; i++)
- {
- b=PAGE_ALIGN((unsigned long) &trapbase);
-
- switch_to_context(i);
-
- for(;b<a; b+=4096)
- {
- put_pte(b, (get_pte(b) & mask));
- }
- }
-
- invalidate(); /* flush the virtual address cache */
-
- printk("\nCurrently in context - ");
- for(i=0; i<num_contexts; i++)
- {
- switch_to_context(i);
- printk("%d ", (int) i);
- }
- printk("\n");
+ end_mem &= PAGE_MASK;
+ high_memory = end_mem;
- switch_to_context(0);
+ start_mem = PAGE_ALIGN(start_mem);
- invalidate();
- return start_mem;
-}
+ addr = PAGE_OFFSET;
+ while(addr < start_mem) {
+ mem_map[MAP_NR(addr)] = MAP_PAGE_RESERVED;
+ addr += PAGE_SIZE;
+ }
-void mem_init(unsigned long start_mem, unsigned long end_mem)
-{
- unsigned long start_low_mem = PAGE_SIZE;
- int codepages = 0;
- int reservedpages = 0;
- int datapages = 0;
- int i = 0;
- unsigned long tmp, limit, tmp2, addr;
- extern char etext;
-
- end_mem &= PAGE_MASK;
- high_memory = end_mem;
-
- start_low_mem = PAGE_ALIGN(start_low_mem);
- start_mem = PAGE_ALIGN(start_mem);
-
- for(i = 0; sp_banks[i].num_bytes != 0; i++) {
- tmp = sp_banks[i].base_addr;
- limit = (sp_banks[i].base_addr + sp_banks[i].num_bytes);
- if(tmp<start_mem) {
- if(limit>start_mem)
- tmp = start_mem;
- else continue;
- }
-
- while(tmp<limit) {
- mem_map[MAP_NR(tmp)] = 0;
- tmp += PAGE_SIZE;
- }
- if(sp_banks[i+1].num_bytes != 0)
- while(tmp < sp_banks[i+1].base_addr) {
- mem_map[MAP_NR(tmp)] = MAP_PAGE_RESERVED;
- tmp += PAGE_SIZE;
- }
- }
+ for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE)
+ mem_map[MAP_NR(addr)] = 0;
#ifdef CONFIG_SCSI
- scsi_mem_init(high_memory);
+ scsi_mem_init(high_memory);
#endif
- for (addr = 0; addr < high_memory; addr += PAGE_SIZE) {
- if(mem_map[MAP_NR(addr)]) {
- if (addr < (unsigned long) &etext)
- codepages++;
- else if(addr < start_mem)
- datapages++;
- else
- reservedpages++;
- continue;
- }
- mem_map[MAP_NR(addr)] = 1;
- free_page(addr);
- }
-
- tmp2 = nr_free_pages << PAGE_SHIFT;
-
- printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
- tmp2 >> 10,
- high_memory >> 10,
- codepages << (PAGE_SHIFT-10),
- reservedpages << (PAGE_SHIFT-10),
- datapages << (PAGE_SHIFT-10));
+ for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
+ if(mem_map[MAP_NR(addr)]) {
+ if (addr < (unsigned long) &etext)
+ codepages++;
+ else if(addr < start_mem)
+ datapages++;
+ else
+ reservedpages++;
+ continue;
+ }
+ mem_map[MAP_NR(addr)] = 1;
+ free_page(addr);
+ }
+
+ tmp2 = nr_free_pages << PAGE_SHIFT;
+
+ printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
+ tmp2 >> 10,
+ (high_memory - PAGE_OFFSET) >> 10,
+ codepages << (PAGE_SHIFT-10),
+ reservedpages << (PAGE_SHIFT-10),
+ datapages << (PAGE_SHIFT-10));
+
+/* Heh, test write protection just like the i386, this is bogus but it is
+ * fun to do ;)
+ */
+ switch(sparc_cpu_model) {
+ case sun4c:
+ start_mem = sun4c_test_wp(start_mem);
+ break;
+ case sun4m:
+ case sun4d:
+ case sun4e:
+ srmmu_test_wp();
+ break;
+ default:
+ printk("mem_init: Could not test WP bit on this machine.\n");
+ printk("mem_init: sparc_cpu_model = %d\n", sparc_cpu_model);
+ printk("mem_init: Halting...\n");
+ halt();
+ };
+
+#ifdef DEBUG_MEMINIT
+ printk("Breaker breaker...Roger roger.... Over and out...\n");
+#endif
+ invalidate();
- invalidate();
- return;
+ return;
}
void si_meminfo(struct sysinfo *val)
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov
with Sam's (original) version of this