static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
{
- const void *end = p + size;
size_t cacheline_mask = dcache_line_bytes - 1;
dsb(sy); /* So the CPU issues all writes to the range */
if ( (uintptr_t)p & cacheline_mask )
{
+ size -= dcache_line_bytes - ((uintptr_t)p & cacheline_mask);
p = (void *)((uintptr_t)p & ~cacheline_mask);
asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
p += dcache_line_bytes;
}
- if ( (uintptr_t)end & cacheline_mask )
- {
- end = (void *)((uintptr_t)end & ~cacheline_mask);
- asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (end));
- }
- for ( ; p < end; p += dcache_line_bytes )
+ for ( ; size >= dcache_line_bytes;
+ p += dcache_line_bytes, size -= dcache_line_bytes )
asm volatile (__invalidate_dcache_one(0) : : "r" (p));
+ if ( size > 0 )
+ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
+
dsb(sy); /* So we know the flushes happen before continuing */
return 0;
static inline int clean_dcache_va_range(const void *p, unsigned long size)
{
- const void *end = p + size;
+ size_t cacheline_mask = dcache_line_bytes - 1;
+
dsb(sy); /* So the CPU issues all writes to the range */
- p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1));
- for ( ; p < end; p += dcache_line_bytes )
+ size += (uintptr_t)p & cacheline_mask;
+ size = (size + cacheline_mask) & ~cacheline_mask;
+ p = (void *)((uintptr_t)p & ~cacheline_mask);
+ for ( ; size >= dcache_line_bytes;
+ p += dcache_line_bytes, size -= dcache_line_bytes )
asm volatile (__clean_dcache_one(0) : : "r" (p));
dsb(sy); /* So we know the flushes happen before continuing */
/* ARM callers assume that dcache_* functions cannot fail. */
static inline int clean_and_invalidate_dcache_va_range
(const void *p, unsigned long size)
{
- const void *end = p + size;
+ size_t cacheline_mask = dcache_line_bytes - 1;
+
dsb(sy); /* So the CPU issues all writes to the range */
- p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1));
- for ( ; p < end; p += dcache_line_bytes )
+ size += (uintptr_t)p & cacheline_mask;
+ size = (size + cacheline_mask) & ~cacheline_mask;
+ p = (void *)((uintptr_t)p & ~cacheline_mask);
+ for ( ; size >= dcache_line_bytes;
+ p += dcache_line_bytes, size -= dcache_line_bytes )
asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
dsb(sy); /* So we know the flushes happen before continuing */
/* ARM callers assume that dcache_* functions cannot fail. */