summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-30 01:55:44 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-30 11:25:36 -0700
commitca889e6c45e0b112cb2ca9d35afc66297519b5d5 (patch)
tree0a5efdec2a61540204d34bcbf56dc691d8f9c391 /mm
parentbab1846a0582f627f5ec22aa2dc5f4f3e82e8176 (diff)
[PATCH] Use Zoned VM Counters for NUMA statistics
The numa statistics are really event counters. But they are per node and so we have had special treatment for these counters through additional fields on the pcp structure. We can now use the per zone nature of the zoned VM counters to realize these. This will shrink the size of the pcp structure on NUMA systems. We will have some room to add additional per zone counters that will all still fit in the same cacheline. Bits Prior pcp size Size after patch We can add ------------------------------------------------------------------ 64 128 bytes (16 words) 80 bytes (10 words) 48 32 76 bytes (19 words) 56 bytes (14 words) 8 (64 byte cacheline) 72 (128 byte) Remove the special statistics for numa and replace them with zoned vm counters. This has the side effect that global sums of these events now show up in /proc/vmstat. Also take the opportunity to move the zone_statistics() function from page_alloc.c into vmstat.c. Discussions: V2 http://marc.theaimsgroup.com/?t=115048227000002&r=1&w=2 Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/page_alloc.c23
-rw-r--r--mm/vmstat.c73
3 files changed, 50 insertions, 52 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 6b9740bbf4c..e07e27e846a 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1209,10 +1209,8 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
page = __alloc_pages(gfp, order, zl);
- if (page && page_zone(page) == zl->zones[0]) {
- zone_pcp(zl->zones[0],get_cpu())->interleave_hit++;
- put_cpu();
- }
+ if (page && page_zone(page) == zl->zones[0])
+ inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
return page;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6aa2c31f513..d61671260f9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -709,27 +709,6 @@ void drain_local_pages(void)
}
#endif /* CONFIG_PM */
-static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
-{
-#ifdef CONFIG_NUMA
- pg_data_t *pg = z->zone_pgdat;
- pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
- struct per_cpu_pageset *p;
-
- p = zone_pcp(z, cpu);
- if (pg == orig) {
- p->numa_hit++;
- } else {
- p->numa_miss++;
- zone_pcp(zonelist->zones[0], cpu)->numa_foreign++;
- }
- if (pg == NODE_DATA(numa_node_id()))
- p->local_node++;
- else
- p->other_node++;
-#endif
-}
-
/*
* Free a 0-order page
*/
@@ -827,7 +806,7 @@ again:
}
__mod_page_state_zone(zone, pgalloc, 1 << order);
- zone_statistics(zonelist, zone, cpu);
+ zone_statistics(zonelist, zone);
local_irq_restore(flags);
put_cpu();
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 06a6d105219..ee7f8966625 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -185,9 +185,8 @@ EXPORT_SYMBOL(mod_zone_page_state);
* in between and therefore the atomicity vs. interrupt cannot be exploited
* in a useful way here.
*/
-void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
+static void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
- struct zone *zone = page_zone(page);
s8 *p = diff_pointer(zone, item);
(*p)++;
@@ -197,6 +196,11 @@ void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
*p = 0;
}
}
+
+void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
+{
+ __inc_zone_state(page_zone(page), item);
+}
EXPORT_SYMBOL(__inc_zone_page_state);
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
@@ -213,22 +217,23 @@ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
}
EXPORT_SYMBOL(__dec_zone_page_state);
+void inc_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __inc_zone_state(zone, item);
+ local_irq_restore(flags);
+}
+
void inc_zone_page_state(struct page *page, enum zone_stat_item item)
{
unsigned long flags;
struct zone *zone;
- s8 *p;
zone = page_zone(page);
local_irq_save(flags);
- p = diff_pointer(zone, item);
-
- (*p)++;
-
- if (unlikely(*p > STAT_THRESHOLD)) {
- zone_page_state_add(*p, zone, item);
- *p = 0;
- }
+ __inc_zone_state(zone, item);
local_irq_restore(flags);
}
EXPORT_SYMBOL(inc_zone_page_state);
@@ -297,6 +302,28 @@ EXPORT_SYMBOL(refresh_vm_stats);
#endif
+#ifdef CONFIG_NUMA
+/*
+ * zonelist = the list of zones passed to the allocator
+ * z = the zone from which the allocation occurred.
+ *
+ * Must be called with interrupts disabled.
+ */
+void zone_statistics(struct zonelist *zonelist, struct zone *z)
+{
+ if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
+ __inc_zone_state(z, NUMA_HIT);
+ } else {
+ __inc_zone_state(z, NUMA_MISS);
+ __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
+ }
+ if (z->zone_pgdat == NODE_DATA(numa_node_id()))
+ __inc_zone_state(z, NUMA_LOCAL);
+ else
+ __inc_zone_state(z, NUMA_OTHER);
+}
+#endif
+
#ifdef CONFIG_PROC_FS
#include <linux/seq_file.h>
@@ -369,6 +396,15 @@ static char *vmstat_text[] = {
"nr_unstable",
"nr_bounce",
+#ifdef CONFIG_NUMA
+ "numa_hit",
+ "numa_miss",
+ "numa_foreign",
+ "numa_interleave",
+ "numa_local",
+ "numa_other",
+#endif
+
/* Event counters */
"pgpgin",
"pgpgout",
@@ -490,21 +526,6 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
pageset->pcp[j].high,
pageset->pcp[j].batch);
}
-#ifdef CONFIG_NUMA
- seq_printf(m,
- "\n numa_hit: %lu"
- "\n numa_miss: %lu"
- "\n numa_foreign: %lu"
- "\n interleave_hit: %lu"
- "\n local_node: %lu"
- "\n other_node: %lu",
- pageset->numa_hit,
- pageset->numa_miss,
- pageset->numa_foreign,
- pageset->interleave_hit,
- pageset->local_node,
- pageset->other_node);
-#endif
}
seq_printf(m,
"\n all_unreclaimable: %u"