summaryrefslogtreecommitdiff
path: root/include/asm-powerpc/cacheflush.h
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-11-10 11:50:16 +1100
committerPaul Mackerras <paulus@samba.org>2005-11-10 13:09:22 +1100
commit26ef5c09576496dfd08d2b36ec1d08a6f917a0eb (patch)
tree6a0bc875966eb00dc04dc2fdf7deeac96262698b /include/asm-powerpc/cacheflush.h
parente130bedb7ce718a8eb6b56a3806b96281f618111 (diff)
[PATCH] powerpc: Merge cacheflush.h and cache.h
The ppc32 and ppc64 versions of cacheflush.h were almost identical. The two versions of cache.h are fairly similar, except for a bunch of register definitions in the ppc32 version which probably belong better elsewhere. This patch, therefore, merges both headers. Notable points: - there are several functions in cacheflush.h which exist only on ppc32 or only on ppc64. These are handled by #ifdef for now, but these should probably be consolidated, along with the actual code behind them later. - Confusingly, both ppc32 and ppc64 have a flush_dcache_range(), but they're subtly different: it uses dcbf on ppc32 and dcbst on ppc64, ppc64 has a flush_inval_dcache_range() which uses dcbf. These too should be merged and consolidated later. - Also flush_dcache_range() was defined in cacheflush.h on ppc64, and in cache.h on ppc32. In the merged version it's in cacheflush.h - On ppc32 flush_icache_range() is a normal function from misc.S. On ppc64, it was wrapper, testing a feature bit before calling __flush_icache_range() which does the actual flush. This patch takes the ppc64 approach, which amounts to no change on ppc32, since CPU_FTR_COHERENT_ICACHE will never be set there, but does mean renaming flush_icache_range() to __flush_icache_range() in arch/ppc/kernel/misc.S and arch/powerpc/kernel/misc_32.S - The PReP register info from asm-ppc/cache.h has moved to arch/ppc/platforms/prep_setup.c - The 8xx register info from asm-ppc/cache.h has moved to a new asm-powerpc/reg_8xx.h, included from reg.h - flush_dcache_all() was defined on ppc32 (only), but was never called (although it was exported). Thus this patch removes it from cacheflush.h and from ARCH=powerpc (misc_32.S) entirely. It's left in ARCH=ppc for now, with the prototype moved to ppc_ksyms.c. Built for Walnut (ARCH=ppc), 32-bit multiplatform (pmac, CHRP and PReP ARCH=ppc, pmac and CHRP ARCH=powerpc). Built and booted on POWER5 LPAR (ARCH=powerpc and ARCH=ppc64). Built for 32-bit powermac (ARCH=ppc and ARCH=powerpc). Built and booted on POWER5 LPAR (ARCH=powerpc and ARCH=ppc64). Built and booted on G5 (ARCH=powerpc) Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc/cacheflush.h')
-rw-r--r--include/asm-powerpc/cacheflush.h68
1 files changed, 68 insertions, 0 deletions
diff --git a/include/asm-powerpc/cacheflush.h b/include/asm-powerpc/cacheflush.h
new file mode 100644
index 00000000000..8a740c88d93
--- /dev/null
+++ b/include/asm-powerpc/cacheflush.h
@@ -0,0 +1,68 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_CACHEFLUSH_H
+#define _ASM_POWERPC_CACHEFLUSH_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm.h>
+#include <asm/cputable.h>
+
+/*
+ * No cache flushing is required when address mappings are changed,
+ * because the caches on PowerPCs are physically addressed.
+ */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define flush_icache_page(vma, page) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
+extern void flush_dcache_page(struct page *page);
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+extern void __flush_icache_range(unsigned long, unsigned long);
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+ __flush_icache_range(start, stop);
+}
+
+extern void flush_icache_user_range(struct vm_area_struct *vma,
+ struct page *page, unsigned long addr,
+ int len);
+extern void __flush_dcache_icache(void *page_va);
+extern void flush_dcache_icache_page(struct page *page);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
+extern void __flush_dcache_icache_phys(unsigned long physaddr);
+#endif /* CONFIG_PPC32 && !CONFIG_BOOKE */
+
+extern void flush_dcache_range(unsigned long start, unsigned long stop);
+#ifdef CONFIG_PPC32
+extern void clean_dcache_range(unsigned long start, unsigned long stop);
+extern void invalidate_dcache_range(unsigned long start, unsigned long stop);
+#endif /* CONFIG_PPC32 */
+#ifdef CONFIG_PPC64
+extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
+extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
+#endif
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ flush_icache_user_range(vma, page, vaddr, len); \
+ } while (0)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_CACHEFLUSH_H */