patch-2.4.0-test12 linux/arch/mips64/kernel/smp.c
Next file: linux/arch/mips64/kernel/syscall.c
Previous file: linux/arch/mips64/kernel/signal32.c
Back to the patch index
Back to the overall index
- Lines: 88
- Date:
Tue Nov 28 21:42:04 2000
- Orig file:
v2.4.0-test11/linux/arch/mips64/kernel/smp.c
- Orig date:
Tue Oct 31 12:42:26 2000
diff -u --recursive --new-file v2.4.0-test11/linux/arch/mips64/kernel/smp.c linux/arch/mips64/kernel/smp.c
@@ -168,7 +168,7 @@
return 0;
}
-void smp_call_function_interrupt(void)
+extern void smp_call_function_interrupt(int irq, void *d, struct pt_regs *r)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
@@ -205,9 +205,29 @@
_flush_tlb_mm((struct mm_struct *)mm);
}
+/*
+ * The following tlb flush calls are invoked when old translations are
+ * being torn down, or pte attributes are changing. For single threaded
+ * address spaces, a new context is obtained on the current cpu, and tlb
+ * context on other cpus are invalidated to force a new context allocation
+ * at switch_mm time, should the mm ever be used on other cpus. For
+ * multithreaded address spaces, intercpu interrupts have to be sent.
+ * Another case where intercpu interrupts are required is when the target
+ * mm might be active on another cpu (eg debuggers doing the flushes on
+ * behalf of debugees, kswapd stealing pages from another process etc).
+ * Kanoj 07/00.
+ */
+
void flush_tlb_mm(struct mm_struct *mm)
{
- smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
+ if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
+ } else {
+ int i;
+ for (i = 0; i < smp_num_cpus; i++)
+ if (smp_processor_id() != i)
+ CPU_CONTEXT(i, mm) = 0;
+ }
_flush_tlb_mm(mm);
}
@@ -227,12 +247,19 @@
void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
- struct flush_tlb_data fd;
+ if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ struct flush_tlb_data fd;
- fd.mm = mm;
- fd.addr1 = start;
- fd.addr2 = end;
- smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
+ fd.mm = mm;
+ fd.addr1 = start;
+ fd.addr2 = end;
+ smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
+ } else {
+ int i;
+ for (i = 0; i < smp_num_cpus; i++)
+ if (smp_processor_id() != i)
+ CPU_CONTEXT(i, mm) = 0;
+ }
_flush_tlb_range(mm, start, end);
}
@@ -245,11 +272,18 @@
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
- struct flush_tlb_data fd;
+ if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
+ struct flush_tlb_data fd;
- fd.vma = vma;
- fd.addr1 = page;
- smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
+ fd.vma = vma;
+ fd.addr1 = page;
+ smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
+ } else {
+ int i;
+ for (i = 0; i < smp_num_cpus; i++)
+ if (smp_processor_id() != i)
+ CPU_CONTEXT(i, vma->vm_mm) = 0;
+ }
_flush_tlb_page(vma, page);
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)