patch-2.4.22 linux-2.4.22/arch/mips/mm/tlb-r4k.c

Next file: linux-2.4.22/arch/mips/mm/tlb-sb1.c
Previous file: linux-2.4.22/arch/mips/mm/tlb-r3k.c
Back to the patch index
Back to the overall index

diff -urN linux-2.4.21/arch/mips/mm/tlb-r4k.c linux-2.4.22/arch/mips/mm/tlb-r4k.c
@@ -44,66 +44,64 @@
 	printk("[tlball]");
 #endif
 
-	__save_and_cli(flags);
+	local_irq_save(flags);
 	/* Save old context and create impossible VPN2 value */
-	old_ctx = (get_entryhi() & 0xff);
-	set_entrylo0(0);
-	set_entrylo1(0);
+	old_ctx = (read_c0_entryhi() & 0xff);
+	write_c0_entrylo0(0);
+	write_c0_entrylo1(0);
 	BARRIER;
 
-	entry = get_wired();
+	entry = read_c0_wired();
 
 	/* Blast 'em all away. */
-	while (entry < mips_cpu.tlbsize) {
+	while (entry < current_cpu_data.tlbsize) {
 		/*
 		 * Make sure all entries differ.  If they're not different
 		 * MIPS32 will take revenge ...
 		 */
-		set_entryhi(KSEG0 + entry*0x2000);
-		set_index(entry);
+		write_c0_entryhi(KSEG0 + entry*0x2000);
+		write_c0_index(entry);
 		BARRIER;
 		tlb_write_indexed();
 		BARRIER;
 		entry++;
 	}
 	BARRIER;
-	set_entryhi(old_ctx);
-	__restore_flags(flags);
+	write_c0_entryhi(old_ctx);
+	local_irq_restore(flags);
 }
 
 void local_flush_tlb_mm(struct mm_struct *mm)
 {
-	if (mm->context != 0) {
-		unsigned long flags;
+	int cpu = smp_processor_id();
 
+	if (cpu_context(cpu, mm) != 0) {
 #ifdef DEBUG_TLB
-		printk("[tlbmm<%d>]", mm->context);
+		printk("[tlbmm<%d>]", cpu_context(cpu, mm));
 #endif
-		__save_and_cli(flags);
-		get_new_mmu_context(mm, smp_processor_id());
-		if (mm == current->active_mm)
-			set_entryhi(mm->context & 0xff);
-		__restore_flags(flags);
+		drop_mmu_context(mm,cpu);
 	}
 }
 
 void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
 				unsigned long end)
 {
-	if (mm->context != 0) {
+	int cpu = smp_processor_id();
+
+	if (cpu_context(cpu, mm) != 0) {
 		unsigned long flags;
 		int size;
 
 #ifdef DEBUG_TLB
-		printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & 0xff),
-		       start, end);
+		printk("[tlbrange<%02x,%08lx,%08lx>]",
+		       cpu_asid(cpu, mm), start, end);
 #endif
-		__save_and_cli(flags);
+		local_irq_save(flags);
 		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 		size = (size + 1) >> 1;
-		if (size <= mips_cpu.tlbsize/2) {
-			int oldpid = (get_entryhi() & 0xff);
-			int newpid = (mm->context & 0xff);
+		if (size <= current_cpu_data.tlbsize/2) {
+			int oldpid = read_c0_entryhi() & ASID_MASK;
+			int newpid = cpu_asid(cpu, mm);
 
 			start &= (PAGE_MASK << 1);
 			end += ((PAGE_SIZE << 1) - 1);
@@ -111,63 +109,64 @@
 			while (start < end) {
 				int idx;
 
-				set_entryhi(start | newpid);
+				write_c0_entryhi(start | newpid);
 				start += (PAGE_SIZE << 1);
 				BARRIER;
 				tlb_probe();
 				BARRIER;
-				idx = get_index();
-				set_entrylo0(0);
-				set_entrylo1(0);
+				idx = read_c0_index();
+				write_c0_entrylo0(0);
+				write_c0_entrylo1(0);
 				if (idx < 0)
 					continue;
 				/* Make sure all entries differ. */
-				set_entryhi(KSEG0 + idx*0x2000);
+				write_c0_entryhi(KSEG0 + idx*0x2000);
 				BARRIER;
 				tlb_write_indexed();
 				BARRIER;
 			}
-			set_entryhi(oldpid);
+			write_c0_entryhi(oldpid);
 		} else {
-			get_new_mmu_context(mm, smp_processor_id());
-			if (mm == current->active_mm)
-				set_entryhi(mm->context & 0xff);
+			drop_mmu_context(mm, cpu);
 		}
-		__restore_flags(flags);
+		local_irq_restore(flags);
 	}
 }
 
 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 {
-	if (!vma || vma->vm_mm->context != 0) {
+	int cpu = smp_processor_id();
+
+	if (!vma || cpu_context(cpu, vma->vm_mm) != 0) {
 		unsigned long flags;
 		int oldpid, newpid, idx;
 
 #ifdef DEBUG_TLB
-		printk("[tlbpage<%d,%08lx>]", vma->vm_mm->context, page);
+		printk("[tlbpage<%d,%08lx>]", cpu_context(cpu, vma->vm_mm),
+		       page);
 #endif
-		newpid = (vma->vm_mm->context & 0xff);
+		newpid = cpu_asid(cpu, vma->vm_mm);
 		page &= (PAGE_MASK << 1);
-		__save_and_cli(flags);
-		oldpid = (get_entryhi() & 0xff);
-		set_entryhi(page | newpid);
+		local_irq_save(flags);
+		oldpid = (read_c0_entryhi() & 0xff);
+		write_c0_entryhi(page | newpid);
 		BARRIER;
 		tlb_probe();
 		BARRIER;
-		idx = get_index();
-		set_entrylo0(0);
-		set_entrylo1(0);
+		idx = read_c0_index();
+		write_c0_entrylo0(0);
+		write_c0_entrylo1(0);
 		if(idx < 0)
 			goto finish;
 		/* Make sure all entries differ. */
-		set_entryhi(KSEG0+idx*0x2000);
+		write_c0_entryhi(KSEG0+idx*0x2000);
 		BARRIER;
 		tlb_write_indexed();
 
 	finish:
 		BARRIER;
-		set_entryhi(oldpid);
-		__restore_flags(flags);
+		write_c0_entryhi(oldpid);
+		local_irq_restore(flags);
 	}
 }
 
@@ -175,8 +174,7 @@
  * updates the TLB with the new pte(s), and another which also checks
  * for the R4k "end of page" hardware bug and does the needy.
  */
-void update_mmu_cache(struct vm_area_struct * vma, unsigned long address,
-		      pte_t pte)
+void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 {
 	unsigned long flags;
 	pgd_t *pgdp;
@@ -190,29 +188,36 @@
 	if (current->active_mm != vma->vm_mm)
 		return;
 
-	pid = get_entryhi() & 0xff;
+	pid = read_c0_entryhi() & ASID_MASK;
 
 #ifdef DEBUG_TLB
-	if((pid != (vma->vm_mm->context & 0xff)) || (vma->vm_mm->context == 0)) {
-		printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d tlbpid=%d\n",
-		       (int) (vma->vm_mm->context & 0xff), pid);
+	if ((pid != cpu_asid(cpu, vma->vm_mm)) ||
+	    (cpu_context(vma->vm_mm) == 0)) {
+		printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d "
+		       "tlbpid=%d\n", (int) (cpu_asid(cpu, vma->vm_mm)), pid);
 	}
 #endif
 
-	__save_and_cli(flags);
+	local_irq_save(flags);
 	address &= (PAGE_MASK << 1);
-	set_entryhi(address | (pid));
+	write_c0_entryhi(address | pid);
 	pgdp = pgd_offset(vma->vm_mm, address);
 	BARRIER;
 	tlb_probe();
 	BARRIER;
 	pmdp = pmd_offset(pgdp, address);
-	idx = get_index();
+	idx = read_c0_index();
 	ptep = pte_offset(pmdp, address);
 	BARRIER;
-	set_entrylo0(pte_val(*ptep++) >> 6);
-	set_entrylo1(pte_val(*ptep) >> 6);
-	set_entryhi(address | (pid));
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+	write_c0_entrylo0(ptep->pte_high);
+	ptep++;
+	write_c0_entrylo1(ptep->pte_high);
+#else
+	write_c0_entrylo0(pte_val(*ptep++) >> 6);
+	write_c0_entrylo1(pte_val(*ptep) >> 6);
+#endif
+	write_c0_entryhi(address | pid);
 	BARRIER;
 	if (idx < 0) {
 		tlb_write_random();
@@ -220,9 +225,9 @@
 		tlb_write_indexed();
 	}
 	BARRIER;
-	set_entryhi(pid);
+	write_c0_entryhi(pid);
 	BARRIER;
-	__restore_flags(flags);
+	local_irq_restore(flags);
 }
 
 #if 0
@@ -235,23 +240,23 @@
 	pte_t *ptep;
 	int idx;
 
-	__save_and_cli(flags);
+	local_irq_save(flags);
 	address &= (PAGE_MASK << 1);
-	set_entryhi(address | (get_entryhi() & 0xff));
+	write_c0_entryhi(address | (read_c0_entryhi() & 0xff));
 	pgdp = pgd_offset(vma->vm_mm, address);
 	tlb_probe();
 	pmdp = pmd_offset(pgdp, address);
-	idx = get_index();
+	idx = read_c0_index();
 	ptep = pte_offset(pmdp, address);
-	set_entrylo0(pte_val(*ptep++) >> 6);
-	set_entrylo1(pte_val(*ptep) >> 6);
+	write_c0_entrylo0(pte_val(*ptep++) >> 6);
+	write_c0_entrylo1(pte_val(*ptep) >> 6);
 	BARRIER;
 	if (idx < 0)
 		tlb_write_random();
 	else
 		tlb_write_indexed();
 	BARRIER;
-	__restore_flags(flags);
+	local_irq_restore(flags);
 }
 #endif
 
@@ -263,27 +268,27 @@
 	unsigned long old_pagemask;
 	unsigned long old_ctx;
 
-	__save_and_cli(flags);
+	local_irq_save(flags);
 	/* Save old context and create impossible VPN2 value */
-	old_ctx = get_entryhi() & 0xff;
-	old_pagemask = get_pagemask();
-	wired = get_wired();
-	set_wired(wired + 1);
-	set_index(wired);
-	BARRIER;
-	set_pagemask(pagemask);
-	set_entryhi(entryhi);
-	set_entrylo0(entrylo0);
-	set_entrylo1(entrylo1);
+	old_ctx = read_c0_entryhi() & 0xff;
+	old_pagemask = read_c0_pagemask();
+	wired = read_c0_wired();
+	write_c0_wired(wired + 1);
+	write_c0_index(wired);
+	BARRIER;
+	write_c0_pagemask(pagemask);
+	write_c0_entryhi(entryhi);
+	write_c0_entrylo0(entrylo0);
+	write_c0_entrylo1(entrylo1);
 	BARRIER;
 	tlb_write_indexed();
 	BARRIER;
 
-	set_entryhi(old_ctx);
+	write_c0_entryhi(old_ctx);
 	BARRIER;
-	set_pagemask(old_pagemask);
+	write_c0_pagemask(old_pagemask);
 	local_flush_tlb_all();
-	__restore_flags(flags);
+	local_irq_restore(flags);
 }
 
 /*
@@ -303,58 +308,60 @@
 	unsigned long old_pagemask;
 	unsigned long old_ctx;
 
-	__save_and_cli(flags);
+	local_irq_save(flags);
 	/* Save old context and create impossible VPN2 value */
-	old_ctx = get_entryhi() & 0xff;
-	old_pagemask = get_pagemask();
-	wired = get_wired();
+	old_ctx = read_c0_entryhi() & 0xff;
+	old_pagemask = read_c0_pagemask();
+	wired = read_c0_wired();
 	if (--temp_tlb_entry < wired) {
 		printk(KERN_WARNING "No TLB space left for add_temporary_entry\n");
 		ret = -ENOSPC;
 		goto out;
 	}
 
-	set_index(temp_tlb_entry);
+	write_c0_index(temp_tlb_entry);
 	BARRIER;
-	set_pagemask(pagemask);
-	set_entryhi(entryhi);
-	set_entrylo0(entrylo0);
-	set_entrylo1(entrylo1);
+	write_c0_pagemask(pagemask);
+	write_c0_entryhi(entryhi);
+	write_c0_entrylo0(entrylo0);
+	write_c0_entrylo1(entrylo1);
 	BARRIER;
 	tlb_write_indexed();
 	BARRIER;
 
-	set_entryhi(old_ctx);
+	write_c0_entryhi(old_ctx);
 	BARRIER;
-	set_pagemask(old_pagemask);
+	write_c0_pagemask(old_pagemask);
 out:
-	__restore_flags(flags);
+	local_irq_restore(flags);
 	return ret;
 }
 
 static void __init probe_tlb(unsigned long config)
 {
-	unsigned int prid, config1;
+	unsigned int reg;
 
-	prid = read_32bit_cp0_register(CP0_PRID) & 0xff00;
-	if (prid == PRID_IMP_RM7000 || !(config & (1 << 31)))
+	reg = read_c0_prid() & 0xff00;
+	if (reg == PRID_IMP_RM7000 || !(config & (1 << 31)))
 		/*
-		 * Not a MIPS32 complianant CPU.  Config 1 register not
+		 * Not a MIPS32 compliant CPU.  Config 1 register not
 		 * supported, we assume R4k style.  Cpu probing already figured
 		 * out the number of tlb entries.
 		 */
 		return;
 
-	config1 = read_mips32_cp0_config1();
+#if defined(CONFIG_CPU_MIPS32) || defined (CONFIG_CPU_MIPS64)
+	reg = read_c0_config1();
 	if (!((config >> 7) & 3))
 		panic("No MMU present");
 	else
-		mips_cpu.tlbsize = ((config1 >> 25) & 0x3f) + 1;
+		current_cpu_data.tlbsize = ((reg >> 25) & 0x3f) + 1;
+#endif
 }
 
 void __init r4k_tlb_init(void)
 {
-	u32 config = read_32bit_cp0_register(CP0_CONFIG);
+	u32 config = read_c0_config();
 
 	/*
 	 * You should never change this register:
@@ -364,16 +371,15 @@
 	 *     be set for 4kb pages.
 	 */
 	probe_tlb(config);
-	set_pagemask(PM_4K);
-	write_32bit_cp0_register(CP0_WIRED, 0);
-	temp_tlb_entry = mips_cpu.tlbsize - 1;
+	write_c0_pagemask(PM_4K);
+	write_c0_wired(0);
+	temp_tlb_entry = current_cpu_data.tlbsize - 1;
 	local_flush_tlb_all();
 
-	if ((mips_cpu.options & MIPS_CPU_4KEX)
-	    && (mips_cpu.options & MIPS_CPU_4KTLB)) {
-		if (mips_cpu.cputype == CPU_NEVADA)
+	if (cpu_has_4kex && cpu_has_4ktlb) {
+		if (current_cpu_data.cputype == CPU_NEVADA)
 			memcpy((void *)KSEG0, &except_vec0_nevada, 0x80);
-		else if (mips_cpu.cputype == CPU_R4600)
+		else if (current_cpu_data.cputype == CPU_R4600)
 			memcpy((void *)KSEG0, &except_vec0_r4600, 0x80);
 		else
 			memcpy((void *)KSEG0, &except_vec0_r4000, 0x80);

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)