summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4b09711b6f0fe..33925488040fe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -245,6 +245,11 @@ EXPORT_SYMBOL(init_on_free);
/* perform sanity checks on struct pages being allocated or freed */
static DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
+static inline bool is_check_pages_enabled(void)
+{
+ return static_branch_unlikely(&check_pages_enabled);
+}
+
static bool _init_on_alloc_enabled_early __read_mostly
= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
static int __init early_init_on_alloc(char *buf)
@@ -1450,7 +1455,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
for (i = 1; i < (1 << order); i++) {
if (compound)
bad += free_tail_pages_check(page, page + i);
- if (static_branch_unlikely(&check_pages_enabled)) {
+ if (is_check_pages_enabled()) {
if (unlikely(free_page_is_bad(page + i))) {
bad++;
continue;
@@ -1463,7 +1468,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
page->mapping = NULL;
if (memcg_kmem_online() && PageMemcgKmem(page))
__memcg_kmem_uncharge_page(page, order);
- if (static_branch_unlikely(&check_pages_enabled)) {
+ if (is_check_pages_enabled()) {
if (free_page_is_bad(page))
bad++;
if (bad)
@@ -2373,7 +2378,7 @@ static int check_new_page(struct page *page)
static inline bool check_new_pages(struct page *page, unsigned int order)
{
- if (static_branch_unlikely(&check_pages_enabled)) {
+ if (is_check_pages_enabled()) {
for (int i = 0; i < (1 << order); i++) {
struct page *p = page + i;