#! /bin/sh patch -p1 -f $* < $0 exit $? Patch to support the Viengoos specific scheduler. To disable it, set GC_viengoos_scheduler to 0. diff -uprN -x '*.applied' -x config.guess -x '*~' -x autom4te.cache -x config.sub -x configure gc.orig/allchblk.c gc/allchblk.c --- gc.orig/allchblk.c 2007-06-07 02:40:07.000000000 +0200 +++ gc/allchblk.c 2008-07-05 16:08:59.000000000 +0200 @@ -117,7 +117,9 @@ void GC_print_hblkfreelist() while (h != 0) { hhdr = HDR(h); sz = hhdr -> hb_sz; - GC_printf("\t%p size %lu ", h, (unsigned long)sz); + GC_printf("\t%p-%p size %lu %smapped, ", + h, (word) h + sz - 1, (unsigned long)sz, + IS_MAPPED(hhdr) ? "" : "un"); total_free += sz; if (GC_is_black_listed(h, HBLKSIZE) != 0) { GC_printf("start black listed\n"); @@ -381,31 +383,135 @@ void GC_add_to_fl(struct hblk *h, hdr *h #ifdef USE_MUNMAP +extern signed_word GC_bytes_found; +extern int GC_viengoos_scheduler; + +#ifndef __gnu_hurd_viengoos__ +#define GC_available_bytes ((256 + 128) * 1024 * 1024) +#endif + /* Unmap blocks that haven't been recently touched. This is the only way */ /* way blocks are ever unmapped. */ void GC_unmap_old(void) { struct hblk * h; hdr * hhdr; - word sz; unsigned short last_rec, threshold; int i; # define UNMAP_THRESHOLD 6 - - for (i = 0; i <= N_HBLK_FLS; ++i) { + + start: + if (GC_viengoos_scheduler + && (GC_get_heap_size() - GC_unmapped_bytes + < 7 * (GC_available_bytes / 8))) { + /* The number of mapped bytes is at most 7/8s the available + memory. That's good enough for now. (Recall: the high-water + mark is 15/16s the available memory.) */ + if (0) + printf ("%x: After unmapping %d used (%d available)\n", + l4_myself (), + (GC_get_heap_size() - GC_unmapped_bytes) / 4096, + GC_available_bytes / 4096); + return; + } + + /* Start with the large blocks and work our way down. */ + for (i = N_HBLK_FLS; i >= 0 ; --i) { for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) { hhdr = HDR(h); if (!IS_MAPPED(hhdr)) continue; threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD); last_rec = hhdr -> hb_last_reclaimed; - if ((last_rec > GC_gc_no || last_rec < threshold) - && threshold < GC_gc_no /* not recently wrapped */) { - sz = hhdr -> hb_sz; - GC_unmap((ptr_t)h, sz); - hhdr -> hb_flags |= WAS_UNMAPPED; - } + + if (GC_viengoos_scheduler + || ((last_rec > GC_gc_no || last_rec < threshold) + && threshold < GC_gc_no /* not recently wrapped */)) { + GC_unmap((ptr_t)h, hhdr -> hb_sz); + hhdr -> hb_flags |= WAS_UNMAPPED; + + if (0) + GC_printf ("Unmapped %p-%p(%x)\n", + h, (struct hblk *)((word)h + hhdr -> hb_sz), + hhdr -> hb_sz); + + if (GC_viengoos_scheduler) { + GC_bool need_add = FALSE; + + /* Also unmap all adjacent blocks. */ + int dir; + for (dir = 0; dir < 2; dir ++) + while (1) { + struct hblk *next = + dir == 0 ? (struct hblk *)((word)h + hhdr -> hb_sz) + : GC_free_block_ending_at (h); + + if (! next) + break; + + hdr * nexthdr; + GET_HDR(next, nexthdr); + + if (0 == nexthdr || ! HBLK_IS_FREE(nexthdr)) + break; + + + if (0) + GC_printf ("Merging %p-%p(%x) with %p-%p(%x)\n", + h, (struct hblk *)((word)h + + hhdr -> hb_sz), + hhdr -> hb_sz, + next, + (struct hblk *)((word)next + + nexthdr -> hb_sz), + nexthdr -> hb_sz); + + + /* Remove from free list. */ + GC_remove_from_fl(nexthdr, FL_UNKNOWN); + + if (! need_add) { + need_add = TRUE; + GC_remove_from_fl(hhdr, i); + } + + /* Unmap if not already unmapped. */ + if (IS_MAPPED (nexthdr)) { + GC_unmap((ptr_t)next, nexthdr -> hb_sz); + nexthdr -> hb_flags |= WAS_UNMAPPED; + } + + /* Lexically order. */ + if (dir == 1) { + void* t = hhdr; + hhdr = nexthdr; + nexthdr = t; + + t = h; + h = next; + next = t; + } + + GC_remove_header(next); + + /* Unmap any gap in the middle */ + GC_unmap_gap((ptr_t)h, hhdr -> hb_sz, + (ptr_t)next, nexthdr -> hb_sz); + + hhdr -> hb_sz += nexthdr -> hb_sz; + } + + if (need_add) + /* We removed H from the free lists because its size + changed. Add it back. */ + GC_add_to_fl(h, hhdr); + + /* We've munged with the lists. Start from the + beginning. */ + goto start; + } + } } - } + } } /* Merge all unmapped blocks that are adjacent to other free */ @@ -413,6 +519,12 @@ void GC_unmap_old(void) /* fully mapped or fully unmapped. */ void GC_merge_unmapped(void) { + if (GC_viengoos_scheduler) + /* We did some merging in GC_unmap_old. This is mostly extra + work for us as it undoes the work we did and changes the + number of unmapped pages! */ + return; + struct hblk * h, *next; hdr * hhdr, *nexthdr; word size, nextsize; @@ -600,12 +712,26 @@ GC_allochblk_nth(size_t sz, int kind, un size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz); + int try; + GC_bool saw_unmapped = FALSE; + for (try = 0; try < 2; try ++) { + if (try == 1 && ! saw_unmapped) break; + /* search for a big enough block in free list */ hbp = GC_hblkfreelist[n]; for(; 0 != hbp; hbp = hhdr -> hb_next) { GET_HDR(hbp, hhdr); + size_avail = hhdr->hb_sz; if (size_avail < size_needed) continue; + + /* First time through, ignore unmapped blocks (but note + that we saw one that is large enough). */ + if (try == 0 && ! IS_MAPPED(hhdr)) { + saw_unmapped = TRUE; + continue; + } + if (size_avail != size_needed && !GC_use_entire_heap && !GC_dont_gc @@ -642,7 +768,7 @@ GC_allochblk_nth(size_t sz, int kind, un next_size = (signed_word)(thishdr -> hb_sz); if (next_size < size_avail && next_size >= size_needed - && !GC_is_black_listed(thishbp, (word)size_needed)) { + && !GC_is_black_listed(thishbp, (word)size_needed)) { continue; } } @@ -748,10 +874,12 @@ GC_allochblk_nth(size_t sz, int kind, un /* hbp may be on the wrong freelist; the parameter n */ /* is important. */ hbp = GC_get_first_part(hbp, hhdr, size_needed, n); - break; + goto break_out; } } + } + break_out: if (0 == hbp) return 0; /* Add it to map of valid blocks */ diff -uprN -x '*.applied' -x config.guess -x '*~' -x autom4te.cache -x config.sub -x configure gc.orig/alloc.c gc/alloc.c --- gc.orig/alloc.c 2007-06-22 04:40:30.000000000 +0200 +++ gc/alloc.c 2008-07-08 16:52:55.000000000 +0200 @@ -15,6 +15,13 @@ * */ +#ifdef __gnu_hurd_viengoos__ +#include +#include +#endif +#include +#include +#include # include "private/gc_priv.h" @@ -24,6 +31,8 @@ # include # endif +#include "profile.h" + /* * Separate free lists are maintained for different sized objects * up to MAXOBJBYTES. @@ -223,9 +232,134 @@ void GC_clear_a_few_frames() /* limits used by blacklisting. */ static word GC_collect_at_heapsize = (word)(-1); +int GC_viengoos_scheduler = 1; +int GC_available_bytes = (256 + 128) * 1024 * 1024; +int GC_could_unmap; + +#define THRESHOLD (15 * (GC_available_bytes / 16)) + +#ifdef __gnu_hurd_viengoos__ +static void * +gather_stats (void *arg) +{ + pthread_detach (pthread_self ()); + + int period = 0; + for (;;) + { + struct vg_activity_info info; + error_t err = vg_activity_info (ACTIVITY, + vg_activity_info_stats + | vg_activity_info_pressure, + period, &info); + assert_perror (err); + + switch (info.event) + { + case vg_activity_info_stats: + GC_available_bytes = info.stats.stats[0].available_local * PAGESIZE; + period = info.stats.stats[0].period + 1; + break; + + case vg_activity_info_pressure: + if (-info.pressure.amount * PAGESIZE < GC_available_bytes) + GC_available_bytes -= -info.pressure.amount * PAGESIZE; + else + /* Huh? */ + GC_available_bytes = 0; + break; + + default: + panic ("Unknown event and unrequested event: %d", info.event); + } + + if (0) + printf ("%x: %s: %d alloced: %d, heap: %d, " + "mapped:%d, unmapped: %d, available: %d, " + "low-water: %d\n", + l4_myself (), + info.event == vg_activity_info_stats + ? "Period" : DEBUG_BOLD ("PRESSURE"), + (int) (info.event == vg_activity_info_stats + ? period : info.pressure.amount), + (int) GC_adj_bytes_allocd() / PAGESIZE, + (int) GC_get_heap_size () / PAGESIZE, + (int) (GC_get_heap_size () - GC_unmapped_bytes) / PAGESIZE, + (int) GC_unmapped_bytes / PAGESIZE, + (int) GC_available_bytes / PAGESIZE, + (int) THRESHOLD / PAGESIZE); + } +} +#endif + /* Have we allocated enough to amortize a collection? */ GC_bool GC_should_collect(void) { +#ifndef USE_MUNMAP +# define GC_unmapped_bytes 0 +#endif + + if (GC_viengoos_scheduler) + { + int alloced = GC_adj_bytes_allocd(); +#ifdef __gnu_hurd_viengoos__ + static int init; + if (! init) + { + pthread_t tid; + error_t err = pthread_create (&tid, NULL, gather_stats, NULL); + assert_perror (err); + + init = 1; + } +#endif + + /* Do some clean up if the mapped memory is more than 15/16s the + available memory. We choose 15/16s as we need to consider + meta-data overhead and as we really want to avoid the system + pager kicking in. */ + GC_bool r = GC_get_heap_size() - GC_unmapped_bytes > THRESHOLD; + + if (r && (GC_get_heap_size () - GC_unmapped_bytes - alloced + > GC_available_bytes / 3)){ + /* The number of unused mapped bytes is greater than a third + of the total available memory. Before doing a GC, try to + unmap some free mapped pages. */ + GC_unmap_old (); + + r = GC_get_heap_size() - GC_unmapped_bytes > THRESHOLD; + } + + if (r) { + if (alloced < GC_available_bytes / 100) + /* If we have not allocated anything since the last + collection, don't do a collection. */ + return FALSE; + + static int warning; + /* Only print this once per-gc. GC_allochblk_nth calls this + function repeatedly without immediately following up with + a GC. */ + if (warning != GC_gc_no) { + warning = GC_gc_no; + if (0) + printf ("Scheduling GC: (%u) alloced: %dkb, heap: %dkb, " + "mapped: %dkb, unmapped: %dkb, available: %dkb, " + "low-water: %dkb\n", + GC_gc_no, + (int) alloced / 1024, + (int) GC_get_heap_size () / 1024, + (int) (GC_get_heap_size () - GC_unmapped_bytes) + / 1024, + (int) GC_unmapped_bytes / 1024, + (int) GC_available_bytes / 1024, + (int) THRESHOLD); + } + } + + return r; + } + else return(GC_adj_bytes_allocd() >= min_bytes_allocd() || GC_heapsize >= GC_collect_at_heapsize); } @@ -256,6 +390,8 @@ void GC_maybe_gc(void) n_partial_gcs = 0; return; } else { + profile_region (GC_TIMER); + # ifdef PARALLEL_MARK GC_wait_for_reclaim(); # endif @@ -266,6 +402,7 @@ void GC_maybe_gc(void) (unsigned long)GC_gc_no+1, (long)GC_bytes_allocd); } + GC_promote_black_lists(); (void)GC_reclaim_all((GC_stop_func)0, TRUE); GC_clear_marks(); @@ -275,7 +412,11 @@ void GC_maybe_gc(void) } else { n_partial_gcs++; } + + profile_region_end (); } + + profile_region (GC_TIMER); /* We try to mark with the world stopped. */ /* If we run out of time, this turns into */ /* incremental marking. */ @@ -294,6 +435,8 @@ void GC_maybe_gc(void) GC_n_attempts++; } } + + profile_region_end (); } } @@ -325,6 +468,10 @@ GC_bool GC_try_to_collect_inner(GC_stop_ "Initiating full world-stop collection %lu after %ld allocd bytes\n", (unsigned long)GC_gc_no+1, (long)GC_bytes_allocd); } + + GC_bool ret = TRUE; + profile_region (GC_TIMER); + GC_promote_black_lists(); /* Make sure all blocks have been reclaimed, so sweep routines */ /* don't see cleared mark bits. */ @@ -337,7 +484,8 @@ GC_bool GC_try_to_collect_inner(GC_stop_ if ((GC_find_leak || stop_func != GC_never_stop_func) && !GC_reclaim_all(stop_func, FALSE)) { /* Aborted. So far everything is still consistent. */ - return(FALSE); + ret = FALSE; + goto out; } GC_invalidate_mark_state(); /* Flush mark stack. */ GC_clear_marks(); @@ -354,7 +502,8 @@ GC_bool GC_try_to_collect_inner(GC_stop_ GC_unpromote_black_lists(); } /* else we claim the world is already still consistent. We'll */ /* finish incrementally. */ - return(FALSE); + ret = FALSE; + goto out; } GC_finish_collection(); if (GC_print_stats) { @@ -362,7 +511,10 @@ GC_bool GC_try_to_collect_inner(GC_stop_ GC_log_printf("Complete collection took %lu msecs\n", MS_TIME_DIFF(current_time,start_time)); } - return(TRUE); + + out: + profile_region_end (); + return ret; } @@ -390,6 +542,8 @@ void GC_collect_a_little_inner(int n) if (GC_dont_gc) return; if (GC_incremental && GC_collection_in_progress()) { + profile_region (GC_TIMER); + for (i = GC_deficit; i < GC_RATE*n; i++) { if (GC_mark_some((ptr_t)0)) { /* Need to finish a collection */ @@ -415,6 +569,8 @@ void GC_collect_a_little_inner(int n) } if (GC_deficit > 0) GC_deficit -= GC_RATE*n; if (GC_deficit < 0) GC_deficit = 0; + + profile_region_end (); } else { GC_maybe_gc(); } @@ -904,6 +1060,8 @@ GC_bool GC_expand_hp_inner(word n) /* Force GC before we are likely to allocate past expansion_slop */ GC_collect_at_heapsize = GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE; + GC_collect_at_heapsize = + GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE; # if defined(LARGE_CONFIG) if (GC_collect_at_heapsize < GC_heapsize /* wrapped */) GC_collect_at_heapsize = (word)(-1); @@ -1021,3 +1179,146 @@ ptr_t GC_allocobj(size_t gran, int kind) return(*flh); } + + +void +GC_dump_stats (void) +{ + extern uint64_t gc_map_time; + + printf ("GC: " +#ifdef THREADS + "multi-threaded" +#else + "single threaded" +#endif + ", %s, " +#ifndef PARALLEL_MARK + "non-" +#endif + "parallel mark, dirty bits %smaintained\n", + TRUE_INCREMENTAL ? "true incremental" : + GC_incremental ? "generational" : "stop the world", + GC_dirty_maintained ? "" : "not "); + printf("%d collections\n", (int) GC_gc_no); + printf("Heap size: %d (%d kb)\n", + GC_get_heap_size(), GC_get_heap_size() / 1024); + printf("Total bytes allocated: %d (%d kb)\n", + GC_get_total_bytes (), GC_get_total_bytes () / 1024); + + profile_stats_dump (); +} + +#ifndef __gnu_hurd_viengoos__ +/* profile.c - Profiling support implementation. + Copyright (C) 2008 Free Software Foundation, Inc. + Written by Neal H. Walfield . + + This file is part of the GNU Hurd. + + The GNU Hurd is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 3 of the + License, or (at your option) any later version. + + The GNU Hurd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see + . */ + +#define SIZE 10 +struct site +{ + const char *name; + uint64_t time; + uint64_t start; + int calls; + int pending; +} sites[SIZE]; + +static uint64_t epoch; +static uint64_t calls; +static uint64_t total_time; +/* Number of extant profiling calls. We only update total_time if + EXTANT is 0. The result is that the time spent profiling is + correct, and the percent of the time profile that a function has + been is more meaningful. */ +static int extant; + +static inline uint64_t +now (void) +{ + struct timeval t; + struct timezone tz; + + if (gettimeofday( &t, &tz ) == -1) + return 0; + return (t.tv_sec * 1000000ULL + t.tv_usec); +} + +void +profile_stats_dump (void) +{ + uint64_t n = now (); + + int i; + for (i = 0; i < SIZE; i ++) + if (sites[i].calls) + printf ("%s:\t%d calls,\t%lld ms,\t%lld.%d us per call,\t" + "%d%% total time,\t%d%% profiled time\n", + sites[i].name, + sites[i].calls, + sites[i].time / 1000, + sites[i].time / sites[i].calls, + (int) ((10 * sites[i].time) / sites[i].calls) % 10, + (int) ((100 * sites[i].time) / (n - epoch)), + (int) ((100 * sites[i].time) / total_time)); + + printf ("profiled time: %lld ms, calls: %lld\n", + total_time / 1000, calls); + printf ("uptime: %lld ms\n", (n - epoch) / 1000); +} + +void +profile_start (uintptr_t id, const char *name) +{ + if (! epoch) + epoch = now (); + + struct site *site = &sites[id]; + site->name = name; + + extant ++; + + site->pending ++; + if (site->pending == 1) + site->start = now (); +} + +void +profile_end (uintptr_t id) +{ + struct site *site = &sites[id]; + assert (site->pending); + + extant --; + + site->pending --; + if (site->pending == 0) + { + uint64_t n = now (); + + site->time += n - site->start; + + if (extant == 0) + total_time += n - site->start; + + site->calls ++; + calls ++; + } +} +#endif diff -uprN -x '*.applied' -x config.guess -x '*~' -x autom4te.cache -x config.sub -x configure gc.orig/include/private/gcconfig.h gc/include/private/gcconfig.h --- gc.orig/include/private/gcconfig.h 2008-07-08 16:47:51.000000000 +0200 +++ gc/include/private/gcconfig.h 2008-07-02 16:17:26.000000000 +0200 @@ -1087,6 +1087,8 @@ # ifdef LINUX # define OS_TYPE "LINUX" # define LINUX_STACKBOTTOM +# define USE_MMAP +# define USE_MUNMAP # if 0 # define HEURISTIC1 # undef STACK_GRAN @@ -1278,6 +1280,12 @@ # define DATAEND ((ptr_t) (_end)) /* # define MPROTECT_VDB Not quite working yet? */ # undef DYNAMIC_LOADING + +# include +extern vg_addr_t gc_activity __attribute__ ((weak)); +# define ACTIVITY (&gc_activity ? gc_activity : VG_ADDR_VOID) +extern int GC_available_bytes; + # endif # ifdef DARWIN # define OS_TYPE "DARWIN" diff -uprN -x '*.applied' -x config.guess -x '*~' -x autom4te.cache -x config.sub -x configure gc.orig/os_dep.c gc/os_dep.c --- gc.orig/os_dep.c 2007-06-29 21:17:44.000000000 +0200 +++ gc/os_dep.c 2008-07-08 16:39:31.000000000 +0200 @@ -2064,6 +2064,18 @@ ptr_t GC_unmap_end(ptr_t start, size_t b return end_addr; } +#ifdef __gnu_hurd_viengoos__ +#include +#include +#include + +extern vg_addr_t gc_activity __attribute__ ((weak)); +#define ACTIVITY (&gc_activity ? gc_activity : VG_ADDR_VOID) + +#endif + +#include "profile.h" + /* Under Win32/WinCE we commit (map) and decommit (unmap) */ /* memory using VirtualAlloc and VirtualFree. These functions */ /* work on individual allocations of virtual memory, made */ @@ -2077,10 +2089,12 @@ ptr_t GC_unmap_end(ptr_t start, size_t b /* round the endpoints in both places. */ void GC_unmap(ptr_t start, size_t bytes) { + profile_region (MAP_TIMER); + ptr_t start_addr = GC_unmap_start(start, bytes); ptr_t end_addr = GC_unmap_end(start, bytes); word len = end_addr - start_addr; - if (0 == start_addr) return; + if (0 == start_addr) goto out; # if defined(MSWIN32) || defined(MSWINCE) while (len != 0) { MEMORY_BASIC_INFORMATION mem_info; @@ -2095,6 +2109,9 @@ void GC_unmap(ptr_t start, size_t bytes) start_addr += free_len; len -= free_len; } +# elif defined(__gnu_hurd_viengoos__) + madvise (start, bytes, POSIX_MADV_DONTNEED); + GC_unmapped_bytes += len; # else /* We immediately remap it to prevent an intervening mmap from */ /* accidentally grabbing the same address space. */ @@ -2107,19 +2124,25 @@ void GC_unmap(ptr_t start, size_t bytes) } GC_unmapped_bytes += len; # endif + + out: + profile_region_end (); } void GC_remap(ptr_t start, size_t bytes) { + profile_region (MAP_TIMER); + ptr_t start_addr = GC_unmap_start(start, bytes); ptr_t end_addr = GC_unmap_end(start, bytes); word len = end_addr - start_addr; + if (0 == start_addr) goto out; + # if defined(MSWIN32) || defined(MSWINCE) ptr_t result; - if (0 == start_addr) return; while (len != 0) { MEMORY_BASIC_INFORMATION mem_info; GC_word alloc_len; @@ -2137,11 +2160,14 @@ void GC_remap(ptr_t start, size_t bytes) start_addr += alloc_len; len -= alloc_len; } +# elif defined(__gnu_hurd_viengoos__) + /* Nothing to do. We already discarded it and the next access + will fault it in. */ + GC_unmapped_bytes -= len; # else /* It was already remapped with PROT_NONE. */ int result; - if (0 == start_addr) return; result = mprotect(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC); if (result != 0) { @@ -2152,6 +2178,9 @@ void GC_remap(ptr_t start, size_t bytes) } GC_unmapped_bytes -= len; # endif + + out: + profile_region_end (); } /* Two adjacent blocks have already been unmapped and are about to */ @@ -2160,6 +2189,8 @@ void GC_remap(ptr_t start, size_t bytes) /* unmapped due to alignment constraints. */ void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2, size_t bytes2) { + profile_region (MAP_TIMER); + ptr_t start1_addr = GC_unmap_start(start1, bytes1); ptr_t end1_addr = GC_unmap_end(start1, bytes1); ptr_t start2_addr = GC_unmap_start(start2, bytes2); @@ -2170,7 +2201,7 @@ void GC_unmap_gap(ptr_t start1, size_t b GC_ASSERT(start1 + bytes1 == start2); if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2); if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2); - if (0 == start_addr) return; + if (0 == start_addr) goto out; len = end_addr - start_addr; # if defined(MSWIN32) || defined(MSWINCE) while (len != 0) { @@ -2190,6 +2221,9 @@ void GC_unmap_gap(ptr_t start1, size_t b if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed"); GC_unmapped_bytes += len; # endif + + out: + profile_region_end (); } #endif /* USE_MUNMAP */ diff -uprN -x '*.applied' -x config.guess -x '*~' -x autom4te.cache -x config.sub -x configure gc.orig/profile.h gc/profile.h --- gc.orig/profile.h 1970-01-01 01:00:00.000000000 +0100 +++ gc/profile.h 2008-07-08 16:29:27.000000000 +0200 @@ -0,0 +1,53 @@ +/* profile.h - Profiling support interface. + Copyright (C) 2008 Free Software Foundation, Inc. + Written by Neal H. Walfield . + + This file is part of the GNU Hurd. + + The GNU Hurd is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 3 of the + License, or (at your option) any later version. + + The GNU Hurd is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see + . */ + +#include + +#define GC_TIMER "gc" +#define MAP_TIMER "map" + +#ifndef NPROFILE + +/* Start a timer for the profile site ID (this must be unique per + site, can be the function's address). NAME is the symbolic + name. */ +extern void profile_start (uintptr_t id, const char *name); + +/* End the timer for the profile site ID. */ +extern void profile_end (uintptr_t id); + +extern void profile_stats_dump (void); + +#else + +#define profile_start(id, name) do { } while (0) +#define profile_end(id) do { } while (0) +#define profile_stats_dump() do { } while (0) + +#endif + +#define profile_region(__pr_id) \ + { \ + const char *__pr_id_ = (__pr_id); \ + profile_start((uintptr_t) __pr_id_, __pr_id_) + +#define profile_region_end() \ + profile_end ((uintptr_t) __pr_id_); \ + }