diff options
Diffstat (limited to 'drivers/block/zram/zram_drv.c')
-rw-r--r-- | drivers/block/zram/zram_drv.c | 330 |
1 files changed, 153 insertions, 177 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 9f5020b077c5..fda7d8624889 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -44,6 +44,8 @@ static DEFINE_MUTEX(zram_index_mutex); static int zram_major; static const char *default_compressor = CONFIG_ZRAM_DEF_COMP; +#define ZRAM_MAX_ALGO_NAME_SZ 128 + /* Module params (documentation at end) */ static unsigned int num_devices = 1; /* @@ -58,19 +60,56 @@ static void zram_free_page(struct zram *zram, size_t index); static int zram_read_from_zspool(struct zram *zram, struct page *page, u32 index); -static int zram_slot_trylock(struct zram *zram, u32 index) +#define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map) + +static void zram_slot_lock_init(struct zram *zram, u32 index) +{ + static struct lock_class_key __key; + + lockdep_init_map(slot_dep_map(zram, index), "zram->table[index].lock", + &__key, 0); +} + +/* + * entry locking rules: + * + * 1) Lock is exclusive + * + * 2) lock() function can sleep waiting for the lock + * + * 3) Lock owner can sleep + * + * 4) Use TRY lock variant when in atomic context + * - must check return value and handle locking failers + */ +static __must_check bool zram_slot_trylock(struct zram *zram, u32 index) { - return spin_trylock(&zram->table[index].lock); + unsigned long *lock = &zram->table[index].flags; + + if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) { + mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_); + lock_acquired(slot_dep_map(zram, index), _RET_IP_); + return true; + } + + return false; } static void zram_slot_lock(struct zram *zram, u32 index) { - spin_lock(&zram->table[index].lock); + unsigned long *lock = &zram->table[index].flags; + + mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_); + wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE); + lock_acquired(slot_dep_map(zram, index), _RET_IP_); } static void zram_slot_unlock(struct zram *zram, u32 index) { - spin_unlock(&zram->table[index].lock); + unsigned long *lock = &zram->table[index].flags; + + mutex_release(slot_dep_map(zram, index), _RET_IP_); + clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock); } static inline bool init_done(struct zram *zram) @@ -93,7 +132,6 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) zram->table[index].handle = handle; } -/* flag operations require table entry bit_spin_lock() being held */ static bool zram_test_flag(struct zram *zram, u32 index, enum zram_pageflags flag) { @@ -257,15 +295,24 @@ static void release_pp_ctl(struct zram *zram, struct zram_pp_ctl *ctl) kfree(ctl); } -static void place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl, - struct zram_pp_slot *pps) +static bool place_pp_slot(struct zram *zram, struct zram_pp_ctl *ctl, + u32 index) { - u32 idx; + struct zram_pp_slot *pps; + u32 bid; + + pps = kmalloc(sizeof(*pps), GFP_NOIO | __GFP_NOWARN); + if (!pps) + return false; + + INIT_LIST_HEAD(&pps->entry); + pps->index = index; - idx = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE; - list_add(&pps->entry, &ctl->pp_buckets[idx]); + bid = zram_get_obj_size(zram, pps->index) / PP_BUCKET_SIZE_RANGE; + list_add(&pps->entry, &ctl->pp_buckets[bid]); zram_set_flag(zram, pps->index, ZRAM_PP_SLOT); + return true; } static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl) @@ -699,15 +746,8 @@ static int scan_slots_for_writeback(struct zram *zram, u32 mode, unsigned long index, struct zram_pp_ctl *ctl) { - struct zram_pp_slot *pps = NULL; - for (; nr_pages != 0; index++, nr_pages--) { - if (!pps) - pps = kmalloc(sizeof(*pps), GFP_KERNEL); - if (!pps) - return -ENOMEM; - - INIT_LIST_HEAD(&pps->entry); + bool ok = true; zram_slot_lock(zram, index); if (!zram_allocated(zram, index)) @@ -727,14 +767,13 @@ static int scan_slots_for_writeback(struct zram *zram, u32 mode, !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) goto next; - pps->index = index; - place_pp_slot(zram, ctl, pps); - pps = NULL; + ok = place_pp_slot(zram, ctl, index); next: zram_slot_unlock(zram, index); + if (!ok) + break; } - kfree(pps); return 0; } @@ -748,7 +787,7 @@ static ssize_t writeback_store(struct device *dev, unsigned long index = 0; struct bio bio; struct bio_vec bio_vec; - struct page *page; + struct page *page = NULL; ssize_t ret = len; int mode, err; unsigned long blk_idx = 0; @@ -890,8 +929,10 @@ next: if (blk_idx) free_block_bdev(zram, blk_idx); - __free_page(page); + release_init_lock: + if (page) + __free_page(page); release_pp_ctl(zram, ctl); atomic_set(&zram->pp_in_progress, 0); up_read(&zram->init_lock); @@ -1065,27 +1106,6 @@ static void zram_debugfs_register(struct zram *zram) {}; static void zram_debugfs_unregister(struct zram *zram) {}; #endif -/* - * We switched to per-cpu streams and this attr is not needed anymore. - * However, we will keep it around for some time, because: - * a) we may revert per-cpu streams in the future - * b) it's visible to user space and we need to follow our 2 years - * retirement rule; but we already have a number of 'soon to be - * altered' attrs, so max_comp_streams need to wait for the next - * layoff cycle. - */ -static ssize_t max_comp_streams_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus()); -} - -static ssize_t max_comp_streams_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t len) -{ - return len; -} - static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg) { /* Do not free statically defined compression algorithms */ @@ -1112,7 +1132,7 @@ static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf) size_t sz; sz = strlen(buf); - if (sz >= CRYPTO_MAX_ALG_NAME) + if (sz >= ZRAM_MAX_ALGO_NAME_SZ) return -E2BIG; compressor = kstrdup(buf, GFP_KERNEL); @@ -1420,9 +1440,8 @@ static ssize_t debug_stat_show(struct device *dev, down_read(&zram->init_lock); ret = scnprintf(buf, PAGE_SIZE, - "version: %d\n%8llu %8llu\n", + "version: %d\n0 %8llu\n", version, - (u64)atomic64_read(&zram->stats.writestall), (u64)atomic64_read(&zram->stats.miss_free)); up_read(&zram->init_lock); @@ -1473,15 +1492,11 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) huge_class_size = zs_huge_class_size(zram->mem_pool); for (index = 0; index < num_pages; index++) - spin_lock_init(&zram->table[index].lock); + zram_slot_lock_init(zram, index); + return true; } -/* - * To protect concurrent access to the same index entry, - * caller should hold this table index entry's bit_spinlock to - * indicate this index entry is accessing. - */ static void zram_free_page(struct zram *zram, size_t index) { unsigned long handle; @@ -1548,11 +1563,11 @@ static int read_incompressible_page(struct zram *zram, struct page *page, void *src, *dst; handle = zram_get_handle(zram, index); - src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); + src = zs_obj_read_begin(zram->mem_pool, handle, NULL); dst = kmap_local_page(page); copy_page(dst, src); kunmap_local(dst); - zs_unmap_object(zram->mem_pool, handle); + zs_obj_read_end(zram->mem_pool, handle, src); return 0; } @@ -1570,12 +1585,12 @@ static int read_compressed_page(struct zram *zram, struct page *page, u32 index) prio = zram_get_priority(zram, index); zstrm = zcomp_stream_get(zram->comps[prio]); - src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); + src = zs_obj_read_begin(zram->mem_pool, handle, zstrm->local_copy); dst = kmap_local_page(page); ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst); kunmap_local(dst); - zs_unmap_object(zram->mem_pool, handle); - zcomp_stream_put(zram->comps[prio]); + zs_obj_read_end(zram->mem_pool, handle, src); + zcomp_stream_put(zstrm); return ret; } @@ -1670,7 +1685,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page, u32 index) { unsigned long handle; - void *src, *dst; + void *src; /* * This function is called from preemptible context so we don't need @@ -1678,7 +1693,8 @@ static int write_incompressible_page(struct zram *zram, struct page *page, * like we do for compressible pages. */ handle = zs_malloc(zram->mem_pool, PAGE_SIZE, - GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE); + GFP_NOIO | __GFP_NOWARN | + __GFP_HIGHMEM | __GFP_MOVABLE); if (IS_ERR_VALUE(handle)) return PTR_ERR((void *)handle); @@ -1687,11 +1703,9 @@ static int write_incompressible_page(struct zram *zram, struct page *page, return -ENOMEM; } - dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); src = kmap_local_page(page); - memcpy(dst, src, PAGE_SIZE); + zs_obj_write(zram->mem_pool, handle, src, PAGE_SIZE); kunmap_local(src); - zs_unmap_object(zram->mem_pool, handle); zram_slot_lock(zram, index); zram_set_flag(zram, index, ZRAM_HUGE); @@ -1710,11 +1724,11 @@ static int write_incompressible_page(struct zram *zram, struct page *page, static int zram_write_page(struct zram *zram, struct page *page, u32 index) { int ret = 0; - unsigned long handle = -ENOMEM; - unsigned int comp_len = 0; - void *dst, *mem; + unsigned long handle; + unsigned int comp_len; + void *mem; struct zcomp_strm *zstrm; - unsigned long element = 0; + unsigned long element; bool same_filled; /* First, free memory allocated to this slot (if any) */ @@ -1728,7 +1742,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) if (same_filled) return write_same_filled_page(zram, element, index); -compress_again: zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); mem = kmap_local_page(page); ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm, @@ -1736,59 +1749,32 @@ compress_again: kunmap_local(mem); if (unlikely(ret)) { - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + zcomp_stream_put(zstrm); pr_err("Compression failed! err=%d\n", ret); - zs_free(zram->mem_pool, handle); return ret; } if (comp_len >= huge_class_size) { - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + zcomp_stream_put(zstrm); return write_incompressible_page(zram, page, index); } - /* - * handle allocation has 2 paths: - * a) fast path is executed with preemption disabled (for - * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear, - * since we can't sleep; - * b) slow path enables preemption and attempts to allocate - * the page with __GFP_DIRECT_RECLAIM bit set. we have to - * put per-cpu compression stream and, thus, to re-do - * the compression once handle is allocated. - * - * if we have a 'non-null' handle here then we are coming - * from the slow path and handle has already been allocated. - */ - if (IS_ERR_VALUE(handle)) - handle = zs_malloc(zram->mem_pool, comp_len, - __GFP_KSWAPD_RECLAIM | - __GFP_NOWARN | - __GFP_HIGHMEM | - __GFP_MOVABLE); + handle = zs_malloc(zram->mem_pool, comp_len, + GFP_NOIO | __GFP_NOWARN | + __GFP_HIGHMEM | __GFP_MOVABLE); if (IS_ERR_VALUE(handle)) { - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); - atomic64_inc(&zram->stats.writestall); - handle = zs_malloc(zram->mem_pool, comp_len, - GFP_NOIO | __GFP_HIGHMEM | - __GFP_MOVABLE); - if (IS_ERR_VALUE(handle)) - return PTR_ERR((void *)handle); - - goto compress_again; + zcomp_stream_put(zstrm); + return PTR_ERR((void *)handle); } if (!zram_can_store_page(zram)) { - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + zcomp_stream_put(zstrm); zs_free(zram->mem_pool, handle); return -ENOMEM; } - dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); - - memcpy(dst, zstrm->buffer, comp_len); - zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); - zs_unmap_object(zram->mem_pool, handle); + zs_obj_write(zram->mem_pool, handle, zstrm->buffer, comp_len); + zcomp_stream_put(zstrm); zram_slot_lock(zram, index); zram_set_handle(zram, index, handle); @@ -1835,20 +1821,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, #define RECOMPRESS_IDLE (1 << 0) #define RECOMPRESS_HUGE (1 << 1) -static int scan_slots_for_recompress(struct zram *zram, u32 mode, +static int scan_slots_for_recompress(struct zram *zram, u32 mode, u32 prio_max, struct zram_pp_ctl *ctl) { unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; - struct zram_pp_slot *pps = NULL; unsigned long index; for (index = 0; index < nr_pages; index++) { - if (!pps) - pps = kmalloc(sizeof(*pps), GFP_KERNEL); - if (!pps) - return -ENOMEM; - - INIT_LIST_HEAD(&pps->entry); + bool ok = true; zram_slot_lock(zram, index); if (!zram_allocated(zram, index)) @@ -1867,14 +1847,17 @@ static int scan_slots_for_recompress(struct zram *zram, u32 mode, zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE)) goto next; - pps->index = index; - place_pp_slot(zram, ctl, pps); - pps = NULL; + /* Already compressed with same of higher priority */ + if (zram_get_priority(zram, index) + 1 >= prio_max) + goto next; + + ok = place_pp_slot(zram, ctl, index); next: zram_slot_unlock(zram, index); + if (!ok) + break; } - kfree(pps); return 0; } @@ -1896,9 +1879,8 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, unsigned int comp_len_new; unsigned int class_index_old; unsigned int class_index_new; - u32 num_recomps = 0; - void *src, *dst; - int ret; + void *src; + int ret = 0; handle_old = zram_get_handle(zram, index); if (!handle_old) @@ -1923,6 +1905,16 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, zram_clear_flag(zram, index, ZRAM_IDLE); class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old); + + prio = max(prio, zram_get_priority(zram, index) + 1); + /* + * Recompression slots scan should not select slots that are + * already compressed with a higher priority algorithm, but + * just in case + */ + if (prio >= prio_max) + return 0; + /* * Iterate the secondary comp algorithms list (in order of priority) * and try to recompress the page. @@ -1931,14 +1923,6 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, if (!zram->comps[prio]) continue; - /* - * Skip if the object is already re-compressed with a higher - * priority algorithm (or same algorithm). - */ - if (prio <= zram_get_priority(zram, index)) - continue; - - num_recomps++; zstrm = zcomp_stream_get(zram->comps[prio]); src = kmap_local_page(page); ret = zcomp_compress(zram->comps[prio], zstrm, @@ -1946,8 +1930,9 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, kunmap_local(src); if (ret) { - zcomp_stream_put(zram->comps[prio]); - return ret; + zcomp_stream_put(zstrm); + zstrm = NULL; + break; } class_index_new = zs_lookup_class_index(zram->mem_pool, @@ -1956,7 +1941,8 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, /* Continue until we make progress */ if (class_index_new >= class_index_old || (threshold && comp_len_new >= threshold)) { - zcomp_stream_put(zram->comps[prio]); + zcomp_stream_put(zstrm); + zstrm = NULL; continue; } @@ -1965,14 +1951,6 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, } /* - * We did not try to recompress, e.g. when we have only one - * secondary algorithm and the page is already recompressed - * using that algorithm - */ - if (!zstrm) - return 0; - - /* * Decrement the limit (if set) on pages we can recompress, even * when current recompression was unsuccessful or did not compress * the page below the threshold, because we still spent resources @@ -1981,48 +1959,39 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page, if (*num_recomp_pages) *num_recomp_pages -= 1; - if (class_index_new >= class_index_old) { + /* Compression error */ + if (ret) + return ret; + + if (!zstrm) { /* * Secondary algorithms failed to re-compress the page - * in a way that would save memory, mark the object as - * incompressible so that we will not try to compress - * it again. + * in a way that would save memory. * - * We need to make sure that all secondary algorithms have - * failed, so we test if the number of recompressions matches - * the number of active secondary algorithms. + * Mark the object incompressible if the max-priority + * algorithm couldn't re-compress it. */ - if (num_recomps == zram->num_active_comps - 1) - zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE); + if (prio < zram->num_active_comps) + return 0; + zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE); return 0; } - /* Successful recompression but above threshold */ - if (threshold && comp_len_new >= threshold) - return 0; - /* - * No direct reclaim (slow path) for handle allocation and no - * re-compression attempt (unlike in zram_write_bvec()) since - * we already have stored that object in zsmalloc. If we cannot - * alloc memory for recompressed object then we bail out and - * simply keep the old (existing) object in zsmalloc. + * We are holding per-CPU stream mutex and entry lock so better + * avoid direct reclaim. Allocation error is not fatal since + * we still have the old object in the mem_pool. */ handle_new = zs_malloc(zram->mem_pool, comp_len_new, - __GFP_KSWAPD_RECLAIM | - __GFP_NOWARN | - __GFP_HIGHMEM | - __GFP_MOVABLE); + GFP_NOIO | __GFP_NOWARN | + __GFP_HIGHMEM | __GFP_MOVABLE); if (IS_ERR_VALUE(handle_new)) { - zcomp_stream_put(zram->comps[prio]); + zcomp_stream_put(zstrm); return PTR_ERR((void *)handle_new); } - dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO); - memcpy(dst, zstrm->buffer, comp_len_new); - zcomp_stream_put(zram->comps[prio]); - - zs_unmap_object(zram->mem_pool, handle_new); + zs_obj_write(zram->mem_pool, handle_new, zstrm->buffer, comp_len_new); + zcomp_stream_put(zstrm); zram_free_page(zram, index); zram_set_handle(zram, index, handle_new); @@ -2039,16 +2008,19 @@ static ssize_t recompress_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS; struct zram *zram = dev_to_zram(dev); char *args, *param, *val, *algo = NULL; u64 num_recomp_pages = ULLONG_MAX; struct zram_pp_ctl *ctl = NULL; struct zram_pp_slot *pps; u32 mode = 0, threshold = 0; - struct page *page; + u32 prio, prio_max; + struct page *page = NULL; ssize_t ret; + prio = ZRAM_SECONDARY_COMP; + prio_max = zram->num_active_comps; + args = skip_spaces(buf); while (*args) { args = next_arg(args, ¶m, &val); @@ -2101,7 +2073,7 @@ static ssize_t recompress_store(struct device *dev, if (prio == ZRAM_PRIMARY_COMP) prio = ZRAM_SECONDARY_COMP; - prio_max = min(prio + 1, ZRAM_MAX_COMPS); + prio_max = prio + 1; continue; } } @@ -2129,7 +2101,7 @@ static ssize_t recompress_store(struct device *dev, continue; if (!strcmp(zram->comp_algs[prio], algo)) { - prio_max = min(prio + 1, ZRAM_MAX_COMPS); + prio_max = prio + 1; found = true; break; } @@ -2141,6 +2113,12 @@ static ssize_t recompress_store(struct device *dev, } } + prio_max = min(prio_max, (u32)zram->num_active_comps); + if (prio >= prio_max) { + ret = -EINVAL; + goto release_init_lock; + } + page = alloc_page(GFP_KERNEL); if (!page) { ret = -ENOMEM; @@ -2153,7 +2131,7 @@ static ssize_t recompress_store(struct device *dev, goto release_init_lock; } - scan_slots_for_recompress(zram, mode, ctl); + scan_slots_for_recompress(zram, mode, prio_max, ctl); ret = len; while ((pps = select_pp_slot(ctl))) { @@ -2181,9 +2159,9 @@ next: cond_resched(); } - __free_page(page); - release_init_lock: + if (page) + __free_page(page); release_pp_ctl(zram, ctl); atomic_set(&zram->pp_in_progress, 0); up_read(&zram->init_lock); @@ -2506,7 +2484,6 @@ static DEVICE_ATTR_WO(reset); static DEVICE_ATTR_WO(mem_limit); static DEVICE_ATTR_WO(mem_used_max); static DEVICE_ATTR_WO(idle); -static DEVICE_ATTR_RW(max_comp_streams); static DEVICE_ATTR_RW(comp_algorithm); #ifdef CONFIG_ZRAM_WRITEBACK static DEVICE_ATTR_RW(backing_dev); @@ -2528,7 +2505,6 @@ static struct attribute *zram_disk_attrs[] = { &dev_attr_mem_limit.attr, &dev_attr_mem_used_max.attr, &dev_attr_idle.attr, - &dev_attr_max_comp_streams.attr, &dev_attr_comp_algorithm.attr, #ifdef CONFIG_ZRAM_WRITEBACK &dev_attr_backing_dev.attr, |