patch-2.4.0-test6 linux/arch/sh/mm/cache.c
Next file: linux/arch/sh/mm/fault.c
Previous file: linux/arch/sh/mm/Makefile
Back to the patch index
Back to the overall index
- Lines: 167
- Date:
Wed Aug 9 13:59:04 2000
- Orig file:
v2.4.0-test5/linux/arch/sh/mm/cache.c
- Orig date:
Thu Jul 27 17:37:59 2000
diff -u --recursive --new-file v2.4.0-test5/linux/arch/sh/mm/cache.c linux/arch/sh/mm/cache.c
@@ -207,6 +207,9 @@
* compared the tag of cache and if it's not matched, nothing
* will be occurred. (We can avoid flushing other caches.)
*
+ * NOTE: We can use A-bit feature here, because we have valid
+ * entriy in TLB (at least in UTLB), as dcache_wback_range is
+ * called before this function is called.
*/
for (v = start; v < end; v+=L1_CACHE_BYTES) {
addr = CACHE_IC_ADDRESS_ARRAY | (v&CACHE_IC_ENTRY_MASK)
@@ -225,8 +228,12 @@
*/
void flush_icache_range(unsigned long start, unsigned long end)
{
+ unsigned long flags;
+
+ save_and_cli(flags);
dcache_wback_range(start, end);
icache_purge_range(start, end);
+ restore_flags(flags);
}
/*
@@ -238,11 +245,6 @@
{
unsigned long phys, addr, data, i;
- /*
- * Alas, we don't know where the virtual address is,
- * So, we can't use icache_purge_range().
- */
-
/* Physical address of this page */
phys = (pg - mem_map)*PAGE_SIZE + __MEMORY_START;
@@ -259,6 +261,30 @@
back_to_P1();
}
+/*
+ * Write back & invalidate the D-cache of the page.
+ * (To avoid "alias" issues)
+ */
+void flush_dcache_page(struct page *pg)
+{
+ unsigned long phys, addr, data, i;
+
+ /* Physical address of this page */
+ phys = (pg - mem_map)*PAGE_SIZE + __MEMORY_START;
+
+ jump_to_P2();
+ /* Loop all the D-cache */
+ for (i=0; i<CACHE_OC_NUM_ENTRIES; i++) {
+ addr = CACHE_OC_ADDRESS_ARRAY| (i<<CACHE_OC_ENTRY_SHIFT);
+ data = ctrl_inl(addr);
+ if ((data & CACHE_VALID) && (data&PAGE_MASK) == phys) {
+ data &= ~(CACHE_VALID|CACHE_UPDATED);
+ ctrl_outl(data, addr);
+ }
+ }
+ back_to_P1();
+}
+
void flush_cache_all(void)
{
unsigned long addr, data, i;
@@ -298,56 +324,64 @@
flush_cache_all();
}
+/*
+ * Write back and invalidate D-caches.
+ *
+ * START, END: Virtual Address (U0 address)
+ *
+ * NOTE: We need to flush the _physical_ page entry.
+ * Flushing the cache lines for U0 only isn't enough.
+ * We need to flush for P1 too, which may contain aliases.
+ */
void flush_cache_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
/*
- * Calling
- * dcache_flush_range(start, end);
- * is not good for the purpose of this function. That is,
- * flushing cache lines indexed by the virtual address is not
- * sufficient.
- *
- * Instead, we need to flush the relevant cache lines which
- * hold the data of the corresponding physical memory, as we
- * have "alias" issues.
- *
- * This is needed because, kernel accesses the memory through
- * P1-area (and/or U0-area) and user-space accesses through U0-area.
- * And P1-area and U0-area may use different cache lines for
- * same physical memory.
+ * We could call flush_cache_page for the pages of these range,
+ * but it's not efficient (scan the caches all the time...).
*
- * If we would call dcache_flush_range(), the line of P1-area
- * could remain in the cache, unflushed.
+ * We can't use A-bit magic, as there's the case we don't have
+ * valid entry on TLB.
*/
- unsigned long addr, data, v;
-
- start &= ~(L1_CACHE_BYTES-1);
- jump_to_P2();
-
- for (v = start; v < end; v+=L1_CACHE_BYTES) {
- addr = CACHE_OC_ADDRESS_ARRAY |
- (v&CACHE_OC_ENTRY_PHYS_MASK) | 0x8 /* A-bit */;
- data = (v&0xfffffc00); /* Update=0, Valid=0 */
-
- /* Try all the cases for aliases */
- ctrl_outl(data, addr);
- ctrl_outl(data, addr | 0x1000);
- ctrl_outl(data, addr | 0x2000);
- ctrl_outl(data, addr | 0x3000);
- }
- back_to_P1();
+ flush_cache_all();
}
+/*
+ * Write back and invalidate D-caches for the page.
+ *
+ * ADDR: Virtual Address (U0 address)
+ *
+ * NOTE: We need to flush the _physical_ page entry.
+ * Flushing the cache lines for U0 only isn't enough.
+ * We need to flush for P1 too, which may contain aliases.
+ */
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr)
{
- /* XXX: Umm... this flush out all the cache lines. Any improvement? */
- flush_cache_range(vma->vm_mm, addr, addr+PAGE_SIZE);
+ pgd_t *dir;
+ pmd_t *pmd;
+ pte_t *pte;
+ pte_t entry;
+ unsigned long phys;
+ struct page *pg;
+
+ dir = pgd_offset(vma->vm_mm, addr);
+ pmd = pmd_offset(dir, addr);
+ if (pmd_none(*pmd))
+ return;
+ if (pmd_bad(*pmd))
+ return;
+ pte = pte_offset(pmd, addr);
+ entry = *pte;
+ if (pte_none(entry) || !pte_present(entry))
+ return;
+ phys = pte_val(entry)&PAGE_MASK;
+ pg = virt_to_page(__va(phys));
+ flush_dcache_page(pg);
}
/*
* After accessing the memory from kernel space (P1-area), we need to
- * write back the cache line, to avoid "alias" issues.
+ * write back the cache line to maintain DMA coherency.
*
* We search the D-cache to see if we have the entries corresponding to
* the page, and if found, write back them.
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)