diff options
author | Richard Weinberger <richard@nod.at> | 2019-07-06 22:51:56 +0200 |
---|---|---|
committer | Richard Weinberger <richard@nod.at> | 2019-07-06 22:51:56 +0200 |
commit | 1d2af80d581d1bae81594e497cd57e345235b940 (patch) | |
tree | e1f8bb3d3d1436fe62057becb3eb1233f843f3ba /kernel/bpf/hashtab.c | |
parent | b07079f1642c28dac4f6f339d5aca66203519734 (diff) | |
parent | bce9437a0a48dd5e19490f56e1cdc39a9be5563c (diff) |
Merge tag 'nand/for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux into mtd/next
NAND core changes:
- use longest matching pattern in ->exec_op() default parser
- export NAND operation tracer
- add flag to indicate panic_write in MTD
- use kzalloc() instead of kmalloc() and memset()
Raw NAND controller drivers changes:
- brcmnand:
* fix BCH ECC layout for large page NAND parts
* fallback to detected ecc-strength, ecc-step-size
* when oops in progress use pio and interrupt polling
* code refactor code to introduce helper functions
* add support for v7.3 controller
- FSMC:
* use nand_op_trace for operation tracing
- GPMI:
* move all driver code into single file
* various cleanups (including dmaengine changes)
* use runtime PM to manage clocks
* implement exec_op
- MTK:
* correct low level time calculation of r/w cycle
* improve data sampling timing for read cycle
* add validity check for CE# pin setting
* fix wrongly assigned OOB buffer pointer issue
* re-license MTK NAND driver as Dual MIT/GPL
- STM32:
* manage the get_irq error case
* increase DMA completion timeouts
Raw NAND chips drivers changes:
- Macronix: add read-retry support
Onenand driver changes:
- add support for 8Gb datasize chips
- avoid fall-through warnings
SPI-NAND changes:
- define macros for page-read ops with three-byte addresses
- add support for two-byte device IDs and then for GigaDevice
GD5F1GQ4UFxxG
- add initial support for Paragon PN26G0xA
- handle the case where the last page read has bitflips
Diffstat (limited to 'kernel/bpf/hashtab.c')
-rw-r--r-- | kernel/bpf/hashtab.c | 23 |
1 files changed, 18 insertions, 5 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 192d32e77db3..0f2708fde5f7 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -527,18 +527,30 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) return insn - insn_buf; } -static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) +static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, + void *key, const bool mark) { struct htab_elem *l = __htab_map_lookup_elem(map, key); if (l) { - bpf_lru_node_set_ref(&l->lru_node); + if (mark) + bpf_lru_node_set_ref(&l->lru_node); return l->key + round_up(map->key_size, 8); } return NULL; } +static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) +{ + return __htab_lru_map_lookup_elem(map, key, true); +} + +static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) +{ + return __htab_lru_map_lookup_elem(map, key, false); +} + static u32 htab_lru_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { @@ -1250,6 +1262,7 @@ const struct bpf_map_ops htab_lru_map_ops = { .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, .map_lookup_elem = htab_lru_map_lookup_elem, + .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys, .map_update_elem = htab_lru_map_update_elem, .map_delete_elem = htab_lru_map_delete_elem, .map_gen_lookup = htab_lru_map_gen_lookup, @@ -1281,7 +1294,6 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct htab_elem *l; void __percpu *pptr; int ret = -ENOENT; @@ -1297,8 +1309,9 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) l = __htab_map_lookup_elem(map, key); if (!l) goto out; - if (htab_is_lru(htab)) - bpf_lru_node_set_ref(&l->lru_node); + /* We do not mark LRU map element here in order to not mess up + * eviction heuristics when user space does a map walk. + */ pptr = htab_elem_get_ptr(l, map->key_size); for_each_possible_cpu(cpu) { bpf_long_memcpy(value + off, |