GCC Code Coverage Report


Directory: ./
File: hash_map.c
Date: 2025-12-31 16:19:05
Exec Total Coverage
Lines: 222 222 100.0%
Functions: 16 16 100.0%
Branches: 118 122 96.7%

Line Branch Exec Source
1 /*
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
3 *
4 * Copyright 2021 Mike Becker, Olaf Wintermann All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "cx/hash_map.h"
30
31 #include <string.h>
32 #include <assert.h>
33 #include <errno.h>
34
35 struct cx_hash_map_element_s {
36 /** A pointer to the next element in the current bucket. */
37 struct cx_hash_map_element_s *next;
38
39 /** The corresponding key. */
40 CxHashKey key;
41
42 /** The value data. */
43 char data[];
44 };
45
46 382 static void cx_hash_map_clear(struct cx_map_s *map) {
47 382 struct cx_hash_map_s *hash_map = (struct cx_hash_map_s *) map;
48
2/2
✓ Branch 0 (21→3) taken 6060 times.
✓ Branch 1 (21→22) taken 382 times.
6442 for (size_t i = 0; i < hash_map->bucket_count; i++) {
49 6060 struct cx_hash_map_element_s *elem = hash_map->buckets[i];
50
2/2
✓ Branch 0 (3→4) taken 558 times.
✓ Branch 1 (3→20) taken 5502 times.
6060 if (elem != NULL) {
51 do {
52 1053 struct cx_hash_map_element_s *next = elem->next;
53 // invoke the destructor
54
8/8
✓ Branch 0 (5→6) taken 4 times.
✓ Branch 1 (5→10) taken 1049 times.
✓ Branch 2 (6→7) taken 2 times.
✓ Branch 3 (6→8) taken 2 times.
✓ Branch 4 (10→11) taken 40 times.
✓ Branch 5 (10→15) taken 1013 times.
✓ Branch 6 (11→12) taken 36 times.
✓ Branch 7 (11→13) taken 4 times.
1053 cx_invoke_destructor(map, elem->data);
55 // free the key data
56 1053 cxFree(map->collection.allocator, (void *) elem->key.data);
57 // free the node
58 1053 cxFree(map->collection.allocator, elem);
59 // proceed
60 1053 elem = next;
61
2/2
✓ Branch 0 (17→18) taken 495 times.
✓ Branch 1 (17→19) taken 558 times.
1053 } while (elem != NULL);
62
63 // do not leave a dangling pointer
64 558 hash_map->buckets[i] = NULL;
65 }
66 }
67 382 map->collection.size = 0;
68 382 }
69
70 363 static void cx_hash_map_destructor(struct cx_map_s *map) {
71 363 struct cx_hash_map_s *hash_map = (struct cx_hash_map_s *) map;
72
73 // free the buckets
74 363 cx_hash_map_clear(map);
75 363 cxFree(map->collection.allocator, hash_map->buckets);
76
77 // free the map structure
78 363 cxFree(map->collection.allocator, map);
79 363 }
80
81 1247 static CxMapEntry cx_hash_map_put(
82 CxMap *map,
83 CxHashKey key,
84 void *value
85 ) {
86 1247 struct cx_hash_map_s *hash_map = (struct cx_hash_map_s *) map;
87 1247 const CxAllocator *allocator = map->collection.allocator;
88
89 1247 uint64_t hash = key.hash;
90
2/2
✓ Branch 0 (2→3) taken 13 times.
✓ Branch 1 (2→5) taken 1234 times.
1247 if (hash == 0) {
91 13 cx_hash_murmur(&key);
92 13 hash = key.hash;
93 }
94
95 1247 size_t slot = hash % hash_map->bucket_count;
96 1247 struct cx_hash_map_element_s *elm = hash_map->buckets[slot];
97 1247 struct cx_hash_map_element_s *prev = NULL;
98
99
4/4
✓ Branch 0 (7→8) taken 2262 times.
✓ Branch 1 (7→9) taken 759 times.
✓ Branch 2 (8→6) taken 1774 times.
✓ Branch 3 (8→9) taken 488 times.
3021 while (elm != NULL && elm->key.hash < hash) {
100 1774 prev = elm;
101 1774 elm = elm->next;
102 }
103
104
4/4
✓ Branch 0 (9→10) taken 488 times.
✓ Branch 1 (9→28) taken 759 times.
✓ Branch 2 (11→12) taken 17 times.
✓ Branch 3 (11→28) taken 471 times.
1247 if (elm != NULL && cx_hash_key_cmp(&elm->key, &key) == 0) {
105 // overwrite existing element, but call destructors first
106
8/8
✓ Branch 0 (12→13) taken 2 times.
✓ Branch 1 (12→17) taken 15 times.
✓ Branch 2 (13→14) taken 1 times.
✓ Branch 3 (13→15) taken 1 times.
✓ Branch 4 (17→18) taken 4 times.
✓ Branch 5 (17→22) taken 13 times.
✓ Branch 6 (18→19) taken 3 times.
✓ Branch 7 (18→20) taken 1 times.
17 cx_invoke_destructor(map, elm->data);
107
2/2
✓ Branch 0 (22→23) taken 5 times.
✓ Branch 1 (22→24) taken 12 times.
17 if (value == NULL) {
108 5 memset(elm->data, 0, map->collection.elem_size);
109
2/2
✓ Branch 0 (24→25) taken 6 times.
✓ Branch 1 (24→26) taken 6 times.
12 } else if (map->collection.store_pointer) {
110 6 memcpy(elm->data, &value, sizeof(void *));
111 } else {
112 6 memcpy(elm->data, value, map->collection.elem_size);
113 }
114 } else {
115 // allocate new element
116 1230 struct cx_hash_map_element_s *e = cxMalloc(
117 allocator,
118 1230 sizeof(struct cx_hash_map_element_s) + map->collection.elem_size
119 );
120 if (e == NULL) return (CxMapEntry){NULL, NULL}; // LCOV_EXCL_LINE
121
122 // write the value
123
2/2
✓ Branch 0 (32→33) taken 964 times.
✓ Branch 1 (32→34) taken 266 times.
1230 if (value == NULL) {
124 964 memset(e->data, 0, map->collection.elem_size);
125
2/2
✓ Branch 0 (34→35) taken 148 times.
✓ Branch 1 (34→36) taken 118 times.
266 } else if (map->collection.store_pointer) {
126 148 memcpy(e->data, &value, sizeof(void *));
127 } else {
128 118 memcpy(e->data, value, map->collection.elem_size);
129 }
130
131 // copy the key
132 1230 void *kd = cxMalloc(allocator, key.len);
133 if (kd == NULL) { // LCOV_EXCL_START
134 cxFree(allocator, e);
135 return (CxMapEntry){NULL, NULL};
136 } // LCOV_EXCL_STOP
137 1230 memcpy(kd, key.data, key.len);
138 1230 e->key.data = kd;
139 1230 e->key.len = key.len;
140 1230 e->key.hash = hash;
141
142 // insert the element into the linked list
143
2/2
✓ Branch 0 (41→42) taken 786 times.
✓ Branch 1 (41→43) taken 444 times.
1230 if (prev == NULL) {
144 786 hash_map->buckets[slot] = e;
145 } else {
146 444 prev->next = e;
147 }
148 1230 e->next = elm;
149 1230 elm = e;
150
151 // increase the size
152 1230 map->collection.size++;
153 }
154
155 // return the entry
156 1247 return (CxMapEntry){&elm->key, elm->data};
157 }
158
159 177 static void cx_hash_map_unlink(
160 struct cx_hash_map_s *hash_map,
161 size_t slot,
162 struct cx_hash_map_element_s *prev,
163 struct cx_hash_map_element_s *elm
164 ) {
165 // unlink
166
2/2
✓ Branch 0 (2→3) taken 122 times.
✓ Branch 1 (2→4) taken 55 times.
177 if (prev == NULL) {
167 122 hash_map->buckets[slot] = elm->next;
168 } else {
169 55 prev->next = elm->next;
170 }
171 // free element
172 177 cxFree(hash_map->base.collection.allocator, (void *) elm->key.data);
173 177 cxFree(hash_map->base.collection.allocator, elm);
174 // decrease size
175 177 hash_map->base.collection.size--;
176 177 }
177
178 /**
179 * Helper function to avoid code duplication.
180 *
181 * If @p remove is true, and @p targetbuf is @c NULL, the element
182 * will be destroyed when found.
183 *
184 * If @p remove is true, and @p targetbuf is set, the element will
185 * be copied to that buffer and no destructor function is called.
186 *
187 * If @p remove is false, @p targetbuf must not be non-null and
188 * either the pointer, when the map is storing pointers, is copied
189 * to the target buffer, or a pointer to the stored object will
190 * be copied to the target buffer.
191 *
192 * @param map the map
193 * @param key the key to look up
194 * @param targetbuf see description
195 * @param remove flag indicating whether the looked up entry shall be removed
196 * @return zero, if the key was found, non-zero otherwise
197 */
198 1616 static int cx_hash_map_get_remove(
199 CxMap *map,
200 CxHashKey key,
201 void *targetbuf,
202 bool remove
203 ) {
204 1616 struct cx_hash_map_s *hash_map = (struct cx_hash_map_s *) map;
205
206 1616 uint64_t hash = key.hash;
207
2/2
✓ Branch 0 (2→3) taken 17 times.
✓ Branch 1 (2→5) taken 1599 times.
1616 if (hash == 0) {
208 17 cx_hash_murmur(&key);
209 17 hash = key.hash;
210 }
211
212 1616 size_t slot = hash % hash_map->bucket_count;
213 1616 struct cx_hash_map_element_s *elm = hash_map->buckets[slot];
214 1616 struct cx_hash_map_element_s *prev = NULL;
215
4/4
✓ Branch 0 (28→29) taken 3214 times.
✓ Branch 1 (28→30) taken 525 times.
✓ Branch 2 (29→6) taken 2824 times.
✓ Branch 3 (29→30) taken 390 times.
3739 while (elm && elm->key.hash <= hash) {
216
2/2
✓ Branch 0 (7→8) taken 701 times.
✓ Branch 1 (7→27) taken 2123 times.
2824 if (cx_hash_key_cmp(&elm->key, &key) == 0) {
217
2/2
✓ Branch 0 (8→9) taken 164 times.
✓ Branch 1 (8→22) taken 537 times.
701 if (remove) {
218
2/2
✓ Branch 0 (9→10) taken 40 times.
✓ Branch 1 (9→20) taken 124 times.
164 if (targetbuf == NULL) {
219
8/8
✓ Branch 0 (10→11) taken 2 times.
✓ Branch 1 (10→15) taken 38 times.
✓ Branch 2 (11→12) taken 1 times.
✓ Branch 3 (11→13) taken 1 times.
✓ Branch 4 (15→16) taken 2 times.
✓ Branch 5 (15→21) taken 38 times.
✓ Branch 6 (16→17) taken 1 times.
✓ Branch 7 (16→18) taken 1 times.
40 cx_invoke_destructor(map, elm->data);
220 } else {
221 124 memcpy(targetbuf, elm->data, map->collection.elem_size);
222 }
223 164 cx_hash_map_unlink(hash_map, slot, prev, elm);
224 } else {
225 assert(targetbuf != NULL);
226 537 void *data = NULL;
227
2/2
✓ Branch 0 (22→23) taken 426 times.
✓ Branch 1 (22→24) taken 111 times.
537 if (map->collection.store_pointer) {
228 426 data = *(void **) elm->data;
229 } else {
230 111 data = elm->data;
231 }
232 537 memcpy(targetbuf, &data, sizeof(void *));
233 }
234 701 return 0;
235 }
236 2123 prev = elm;
237 2123 elm = prev->next;
238 }
239
240 915 return 1;
241 }
242
243 651 static void *cx_hash_map_get(
244 const CxMap *map,
245 CxHashKey key
246 ) {
247 // we can safely cast, because we know the map stays untouched
248 651 void *ptr = NULL;
249 651 int found = cx_hash_map_get_remove((CxMap *) map, key, &ptr, false);
250
2/2
✓ Branch 0 (3→4) taken 537 times.
✓ Branch 1 (3→5) taken 114 times.
651 return found == 0 ? ptr : NULL;
251 }
252
253 965 static int cx_hash_map_remove(
254 CxMap *map,
255 CxHashKey key,
256 void *targetbuf
257 ) {
258 965 return cx_hash_map_get_remove(map, key, targetbuf, true);
259 }
260
261 281 static void *cx_hash_map_iter_current_entry(const void *it) {
262 281 const CxMapIterator *iter = it;
263 // we have to cast away const, because of the signature
264 281 return (void*) &iter->entry;
265 }
266
267 109 static void *cx_hash_map_iter_current_key(const void *it) {
268 109 const CxMapIterator *iter = it;
269 109 return (void*) iter->entry.key;
270 }
271
272 108 static void *cx_hash_map_iter_current_value(const void *it) {
273 108 const CxMapIterator *iter = it;
274 108 return iter->entry.value;
275 }
276
277 586 static bool cx_hash_map_iter_valid(const void *it) {
278 586 const CxMapIterator *iter = it;
279 586 return iter->elem != NULL;
280 }
281
282 476 static void cx_hash_map_iter_next(void *it) {
283 476 CxMapIterator *iter = it;
284 476 CxMap *map = iter->map;
285 476 struct cx_hash_map_s *hmap = (struct cx_hash_map_s *) map;
286 476 struct cx_hash_map_element_s *elm = iter->elem;
287
288 // remove current element, if asked
289
2/2
✓ Branch 0 (2→3) taken 13 times.
✓ Branch 1 (2→19) taken 463 times.
476 if (iter->base.remove) {
290
291 // clear the flag
292 13 iter->base.remove = false;
293
294 // determine the next element
295 13 struct cx_hash_map_element_s *next = elm->next;
296
297 // search the previous element
298 13 struct cx_hash_map_element_s *prev = NULL;
299
2/2
✓ Branch 0 (3→4) taken 2 times.
✓ Branch 1 (3→7) taken 11 times.
13 if (hmap->buckets[iter->slot] != elm) {
300 2 prev = hmap->buckets[iter->slot];
301
2/2
✓ Branch 0 (6→5) taken 1 times.
✓ Branch 1 (6→7) taken 2 times.
3 while (prev->next != elm) {
302 1 prev = prev->next;
303 }
304 }
305
306 // destroy
307
8/8
✓ Branch 0 (7→8) taken 4 times.
✓ Branch 1 (7→12) taken 9 times.
✓ Branch 2 (8→9) taken 2 times.
✓ Branch 3 (8→10) taken 2 times.
✓ Branch 4 (12→13) taken 4 times.
✓ Branch 5 (12→17) taken 9 times.
✓ Branch 6 (13→14) taken 2 times.
✓ Branch 7 (13→15) taken 2 times.
13 cx_invoke_destructor(map, elm->data);
308
309 // unlink
310 13 cx_hash_map_unlink(hmap, iter->slot, prev, elm);
311 13 iter->elem_count--;
312
313 // advance
314 13 elm = next;
315 } else {
316 // just advance
317 463 elm = elm->next;
318 463 iter->index++;
319 }
320
321 // search the next bucket, if required
322
4/4
✓ Branch 0 (22→23) taken 1192 times.
✓ Branch 1 (22→24) taken 378 times.
✓ Branch 2 (23→21) taken 1094 times.
✓ Branch 3 (23→24) taken 98 times.
1570 while (elm == NULL && ++iter->slot < hmap->bucket_count) {
323 1094 elm = hmap->buckets[iter->slot];
324 }
325 476 iter->elem = elm;
326
327 // copy data to a location where the iterator can point to
328 // we need to do it here, because the iterator function call
329 // must not modify the iterator (the parameter is const)
330
2/2
✓ Branch 0 (24→25) taken 378 times.
✓ Branch 1 (24→28) taken 98 times.
476 if (elm != NULL) {
331 378 iter->entry.key = &elm->key;
332
2/2
✓ Branch 0 (25→26) taken 290 times.
✓ Branch 1 (25→27) taken 88 times.
378 if (map->collection.store_pointer) {
333 290 iter->entry.value = *(void **) elm->data;
334 } else {
335 88 iter->entry.value = elm->data;
336 }
337 }
338 476 }
339
340 131 static CxMapIterator cx_hash_map_iterator(
341 const CxMap *map,
342 enum cx_map_iterator_type type
343 ) {
344 CxMapIterator iter;
345
346 131 iter.map = (CxMap*) map;
347 131 iter.elem_count = map->collection.size;
348
349
3/4
✓ Branch 0 (2→3) taken 86 times.
✓ Branch 1 (2→4) taken 22 times.
✓ Branch 2 (2→5) taken 23 times.
✗ Branch 3 (2→6) not taken.
131 switch (type) {
350 86 case CX_MAP_ITERATOR_PAIRS:
351 86 iter.elem_size = sizeof(CxMapEntry);
352 86 iter.base.current = cx_hash_map_iter_current_entry;
353 86 break;
354 22 case CX_MAP_ITERATOR_KEYS:
355 22 iter.elem_size = sizeof(CxHashKey);
356 22 iter.base.current = cx_hash_map_iter_current_key;
357 22 break;
358 23 case CX_MAP_ITERATOR_VALUES:
359 23 iter.elem_size = map->collection.elem_size;
360 23 iter.base.current = cx_hash_map_iter_current_value;
361 23 break;
362 131 default:
363 assert(false); // LCOV_EXCL_LINE
364 }
365
366 131 iter.base.valid = cx_hash_map_iter_valid;
367 131 iter.base.next = cx_hash_map_iter_next;
368 131 iter.base.remove = false;
369 131 iter.base.allow_remove = true;
370
371 131 iter.slot = 0;
372 131 iter.index = 0;
373
374
2/2
✓ Branch 0 (6→7) taken 120 times.
✓ Branch 1 (6→13) taken 11 times.
131 if (map->collection.size > 0) {
375 120 struct cx_hash_map_s *hash_map = (struct cx_hash_map_s *) map;
376 120 struct cx_hash_map_element_s *elm = hash_map->buckets[0];
377
2/2
✓ Branch 0 (9→8) taken 92 times.
✓ Branch 1 (9→10) taken 120 times.
212 while (elm == NULL) {
378 92 elm = hash_map->buckets[++iter.slot];
379 }
380 120 iter.elem = elm;
381 120 iter.entry.key = &elm->key;
382
2/2
✓ Branch 0 (10→11) taken 79 times.
✓ Branch 1 (10→12) taken 41 times.
120 if (map->collection.store_pointer) {
383 79 iter.entry.value = *(void **) elm->data;
384 } else {
385 41 iter.entry.value = elm->data;
386 }
387 } else {
388 11 iter.elem = NULL;
389 }
390
391 131 return iter;
392 }
393
394 static cx_map_class cx_hash_map_class = {
395 cx_hash_map_destructor,
396 cx_hash_map_clear,
397 cx_hash_map_put,
398 cx_hash_map_get,
399 cx_hash_map_remove,
400 cx_hash_map_iterator,
401 };
402
403 20 static int cx_map_cmpfunc2_safe_memcmp(const void *a, const void *b, void *c) {
404 // it is not safe to store a pointer to the size in the list
405 // because the entire list structure might get reallocated
406 20 size_t elem_size = (size_t)(uintptr_t)c;
407 20 return memcmp(a, b, elem_size);
408 }
409
410 363 CxMap *cxHashMapCreate(
411 const CxAllocator *allocator,
412 size_t itemsize,
413 size_t buckets
414 ) {
415
2/2
✓ Branch 0 (2→3) taken 70 times.
✓ Branch 1 (2→4) taken 293 times.
363 if (allocator == NULL) {
416 70 allocator = cxDefaultAllocator;
417 }
418
419
2/2
✓ Branch 0 (4→5) taken 356 times.
✓ Branch 1 (4→6) taken 7 times.
363 if (buckets == 0) {
420 // implementation defined default
421 356 buckets = 16;
422 }
423
424 363 struct cx_hash_map_s *map = cxZalloc(allocator, sizeof(struct cx_hash_map_s));
425
1/2
✗ Branch 0 (7→8) not taken.
✓ Branch 1 (7→9) taken 363 times.
363 if (map == NULL) return NULL;
426
427 // initialize hash map members
428 363 map->bucket_count = buckets;
429 363 map->buckets = cxCalloc(allocator, buckets,
430 sizeof(struct cx_hash_map_element_s *));
431 if (map->buckets == NULL) { // LCOV_EXCL_START
432 cxFree(allocator, map);
433 return NULL;
434 } // LCOV_EXCL_STOP
435
436 // initialize base members
437 363 map->base.cl = &cx_hash_map_class;
438 363 map->base.collection.allocator = allocator;
439
440
2/2
✓ Branch 0 (13→14) taken 45 times.
✓ Branch 1 (13→15) taken 318 times.
363 if (itemsize > 0) {
441 45 map->base.collection.elem_size = itemsize;
442 45 map->base.collection.advanced_cmp = cx_map_cmpfunc2_safe_memcmp;
443 45 map->base.collection.cmp_data = (void*)(uintptr_t)itemsize;
444 } else {
445 318 map->base.collection.elem_size = sizeof(void *);
446 318 map->base.collection.store_pointer = true;
447 318 map->base.collection.simple_cmp = cx_cmp_ptr;
448 }
449
450 363 return (CxMap *) map;
451 }
452
453 6 int cxMapRehash(CxMap *map) {
454 6 struct cx_hash_map_s *hash_map = (struct cx_hash_map_s *) map;
455
2/2
✓ Branch 0 (2→3) taken 1 times.
✓ Branch 1 (2→23) taken 5 times.
6 if (map->collection.size > ((hash_map->bucket_count * 3) >> 2)) {
456
457 1 size_t new_bucket_count = (map->collection.size * 5) >> 1;
458
1/2
✗ Branch 0 (3→4) not taken.
✓ Branch 1 (3→5) taken 1 times.
1 if (new_bucket_count < hash_map->bucket_count) {
459 // LCOV_EXCL_START
460 errno = EOVERFLOW;
461 return 1;
462 } // LCOV_EXCL_STOP
463 1 struct cx_hash_map_element_s **new_buckets = cxCalloc(
464 map->collection.allocator,
465 new_bucket_count, sizeof(struct cx_hash_map_element_s *)
466 );
467
468 if (new_buckets == NULL) return 1; // LCOV_EXCL_LINE
469
470 // iterate through the elements and assign them to their new slots
471
2/2
✓ Branch 0 (20→9) taken 7 times.
✓ Branch 1 (20→21) taken 1 times.
8 for (size_t slot = 0; slot < hash_map->bucket_count; slot++) {
472 7 struct cx_hash_map_element_s *elm = hash_map->buckets[slot];
473
2/2
✓ Branch 0 (18→10) taken 10 times.
✓ Branch 1 (18→19) taken 7 times.
17 while (elm != NULL) {
474 10 struct cx_hash_map_element_s *next = elm->next;
475 10 size_t new_slot = elm->key.hash % new_bucket_count;
476
477 // find position where to insert
478 10 struct cx_hash_map_element_s *bucket_next = new_buckets[new_slot];
479 10 struct cx_hash_map_element_s *bucket_prev = NULL;
480
2/2
✓ Branch 0 (12→13) taken 1 times.
✓ Branch 1 (12→14) taken 10 times.
11 while (bucket_next != NULL &&
481
1/2
✓ Branch 0 (13→11) taken 1 times.
✗ Branch 1 (13→14) not taken.
1 bucket_next->key.hash < elm->key.hash) {
482 1 bucket_prev = bucket_next;
483 1 bucket_next = bucket_next->next;
484 }
485
486 // insert
487
2/2
✓ Branch 0 (14→15) taken 9 times.
✓ Branch 1 (14→16) taken 1 times.
10 if (bucket_prev == NULL) {
488 9 elm->next = new_buckets[new_slot];
489 9 new_buckets[new_slot] = elm;
490 } else {
491 1 bucket_prev->next = elm;
492 1 elm->next = bucket_next;
493 }
494
495 // advance
496 10 elm = next;
497 }
498 }
499
500 // assign result to the map
501 1 hash_map->bucket_count = new_bucket_count;
502 1 cxFree(map->collection.allocator, hash_map->buckets);
503 1 hash_map->buckets = new_buckets;
504 }
505 6 return 0;
506 }
507