summaryrefslogtreecommitdiff
path: root/nscd/nscd_helper.c
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2009-05-18 10:19:50 -0700
committerUlrich Drepper <drepper@redhat.com>2009-05-18 10:19:50 -0700
commit5078fff6c4bc1c71c5d558ff7ec4775aa48b0c11 (patch)
tree1cfd1a12546d845b6a06b73871e678b51ea8629c /nscd/nscd_helper.c
parente20c4ef0ef9075f80ca560f6bdd5ed3229673067 (diff)
Fix forced loop termination in nscd database lookup.
There are two issues with the forced loop exit in the nscd lookup: 1. the estimate of the entry size isn't pessimistic enough for all databases, resulting potentially is too early exits 2. the combination of 64-bit process and 32-bit nscd would lead to rejecting valid records in the database.
Diffstat (limited to 'nscd/nscd_helper.c')
-rw-r--r--nscd/nscd_helper.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/nscd/nscd_helper.c b/nscd/nscd_helper.c
index c09f00859e..fe63f9a7fe 100644
--- a/nscd/nscd_helper.c
+++ b/nscd/nscd_helper.c
@@ -468,6 +468,15 @@ __nscd_get_map_ref (request_type type, const char *name,
}
+/* Using sizeof (hashentry) is not always correct to determine the size of
+ the data structure as found in the nscd cache. The program could be
+ a 64-bit process and nscd could be a 32-bit process. In this case
+ sizeof (hashentry) would overestimate the size. The following is
+ the minimum size of such an entry, good enough for our tests here. */
+#define MINIMUM_HASHENTRY_SIZE \
+ (offsetof (struct hashentry, dellist) + sizeof (int32_t))
+
+
/* Don't return const struct datahead *, as eventhough the record
is normally constant, it can change arbitrarily during nscd
garbage collection. */
@@ -481,10 +490,11 @@ __nscd_cache_search (request_type type, const char *key, size_t keylen,
ref_t trail = mapped->head->array[hash];
trail = atomic_forced_read (trail);
ref_t work = trail;
- size_t loop_cnt = datasize / (offsetof (struct datahead, data) + datalen);
+ size_t loop_cnt = datasize / (MINIMUM_HASHENTRY_SIZE
+ + offsetof (struct datahead, data) / 2);
int tick = 0;
- while (work != ENDREF && work + sizeof (struct hashentry) <= datasize)
+ while (work != ENDREF && work + MINIMUM_HASHENTRY_SIZE <= datasize)
{
struct hashentry *here = (struct hashentry *) (mapped->data + work);
ref_t here_key, here_packet;
@@ -541,7 +551,7 @@ __nscd_cache_search (request_type type, const char *key, size_t keylen,
return NULL;
#endif
- if (trail + sizeof (struct hashentry) > datasize)
+ if (trail + MINIMUM_HASHENTRY_SIZE > datasize)
return NULL;
trail = atomic_forced_read (trailelem->next);