summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_cache.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2019-12-10 10:11:00 +0100
committerIngo Molnar <mingo@kernel.org>2019-12-10 10:11:00 +0100
commit2040cf9f59037aa8aec749363e69ead165b67b43 (patch)
treee9c15448e841cc493bc80b9f658d7955623e86dd /drivers/gpu/drm/drm_cache.c
parentf66c0447cca1281116224d474cdb37d6a18e4b5b (diff)
parente42617b825f8073569da76dc4510bfa019b1c35a (diff)
Merge tag 'v5.5-rc1' into core/kprobes, to resolve conflicts
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/gpu/drm/drm_cache.c')
-rw-r--r--drivers/gpu/drm/drm_cache.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 3bd76e918b5d2..03e01b000f7ac 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -62,10 +62,10 @@ static void drm_cache_flush_clflush(struct page *pages[],
{
unsigned long i;
- mb();
+ mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
for (i = 0; i < num_pages; i++)
drm_clflush_page(*pages++);
- mb();
+ mb(); /*Also used after CLFLUSH so that all cache is flushed*/
}
#endif
@@ -92,6 +92,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
#elif defined(__powerpc__)
unsigned long i;
+
for (i = 0; i < num_pages; i++) {
struct page *page = pages[i];
void *page_virtual;
@@ -125,10 +126,10 @@ drm_clflush_sg(struct sg_table *st)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
struct sg_page_iter sg_iter;
- mb();
+ mb(); /*CLFLUSH is ordered only by using memory barriers*/
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
- mb();
+ mb(); /*Make sure that all cache line entry is flushed*/
return;
}
@@ -157,12 +158,13 @@ drm_clflush_virt_range(void *addr, unsigned long length)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
+
addr = (void *)(((unsigned long)addr) & -size);
- mb();
+ mb(); /*CLFLUSH is only ordered with a full memory barrier*/
for (; addr < end; addr += size)
clflushopt(addr);
clflushopt(end - 1); /* force serialisation */
- mb();
+ mb(); /*Ensure that evry data cache line entry is flushed*/
return;
}