GCC Code Coverage Report


Directory: ./
File: array_list.c
Date: 2025-12-31 16:19:05
Exec Total Coverage
Lines: 460 460 100.0%
Functions: 45 45 100.0%
Branches: 220 240 91.7%

Line Branch Exec Source
1 /*
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
3 *
4 * Copyright 2021 Mike Becker, Olaf Wintermann All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #ifdef WITH_MEMRCHR
30 #define _GNU_SOURCE
31 #endif
32
33 #include "cx/array_list.h"
34 #include "cx/compare.h"
35 #include <assert.h>
36 #include <string.h>
37 #include <errno.h>
38
39 // LOW LEVEL ARRAY LIST FUNCTIONS
40
41 /**
42 * Intelligently calculates a new capacity, reserving some more
43 * elements than required to prevent too many allocations.
44 *
45 * @param current_capacity the current capacity of the array
46 * @param needed_capacity the required capacity of the array
47 * @return the new capacity
48 */
49 407 static size_t cx_array_grow_capacity(
50 size_t current_capacity,
51 size_t needed_capacity
52 ) {
53
2/2
✓ Branch 0 (2→3) taken 55 times.
✓ Branch 1 (2→4) taken 352 times.
407 if (current_capacity >= needed_capacity) {
54 55 return current_capacity;
55 }
56 352 size_t cap = needed_capacity;
57 size_t alignment;
58
2/2
✓ Branch 0 (4→5) taken 299 times.
✓ Branch 1 (4→6) taken 53 times.
352 if (cap < 128) alignment = 16;
59
2/2
✓ Branch 0 (6→7) taken 46 times.
✓ Branch 1 (6→8) taken 7 times.
53 else if (cap < 1024) alignment = 64;
60
2/2
✓ Branch 0 (8→9) taken 4 times.
✓ Branch 1 (8→10) taken 3 times.
7 else if (cap < 8192) alignment = 512;
61 3 else alignment = 1024;
62 352 return cap - (cap % alignment) + alignment;
63 }
64
65 248 int cx_array_init_(const CxAllocator *allocator, CxArray *array, size_t elem_size, size_t capacity) {
66 248 memset(array, 0, sizeof(CxArray));
67 248 return cx_array_reserve_(allocator, array, elem_size, capacity);
68 }
69
70 189 void cx_array_init_fixed_(CxArray *array, const void *data, size_t capacity, size_t size) {
71 189 array->data = (void*) data;
72 189 array->capacity = capacity;
73 189 array->size = size;
74 189 }
75
76 289 int cx_array_reserve_(const CxAllocator *allocator, CxArray *array, size_t elem_size, size_t capacity) {
77
1/2
✗ Branch 0 (3→4) not taken.
✓ Branch 1 (3→5) taken 289 times.
289 if (cxReallocateArray(allocator, &array->data, capacity, elem_size)) {
78 return -1; // LCOV_EXCL_LINE
79 }
80 289 array->capacity = capacity;
81
2/2
✓ Branch 0 (5→6) taken 1 times.
✓ Branch 1 (5→7) taken 288 times.
289 if (array->size > capacity) {
82 1 array->size = capacity;
83 }
84 289 return 0;
85 }
86
87 7 int cx_array_copy_to_new_(const CxAllocator *allocator, CxArray *array, size_t elem_size, size_t capacity) {
88 CxArray heap_array;
89
1/2
✗ Branch 0 (3→4) not taken.
✓ Branch 1 (3→5) taken 7 times.
7 if (cx_array_init_(allocator, &heap_array, elem_size, capacity)) {
90 return -1; // LCOV_EXCL_LINE
91 }
92 7 heap_array.size = array->size;
93 7 memcpy(heap_array.data, array->data, elem_size * array->size);
94 7 *array = heap_array;
95 7 return 0;
96 }
97
98 7584 int cx_array_insert_(const CxAllocator *allocator, CxArray *array,
99 size_t elem_size, size_t index, const void *other, size_t n) {
100 // out of bounds and special case check
101
2/2
✓ Branch 0 (2→3) taken 4 times.
✓ Branch 1 (2→4) taken 7580 times.
7584 if (index > array->size) return -1;
102
1/2
✗ Branch 0 (4→5) not taken.
✓ Branch 1 (4→6) taken 7580 times.
7580 if (n == 0) return 0;
103
104 // calculate required capacity
105 7580 size_t req_capacity = array->size + n;
106
2/2
✓ Branch 0 (6→7) taken 1 times.
✓ Branch 1 (6→8) taken 7579 times.
7580 if (req_capacity <= array->size) {
107 1 errno = EOVERFLOW;
108 1 return -1;
109 }
110
111 // guarantee enough capacity
112
2/2
✓ Branch 0 (8→9) taken 337 times.
✓ Branch 1 (8→14) taken 7242 times.
7579 if (array->capacity < req_capacity) {
113 337 const size_t new_capacity = cx_array_grow_capacity(array->capacity,req_capacity);
114
2/2
✓ Branch 0 (11→12) taken 1 times.
✓ Branch 1 (11→13) taken 336 times.
337 if (cxReallocateArray(allocator, &array->data, new_capacity, elem_size)) {
115 1 return -1;
116 }
117 336 array->capacity = new_capacity;
118 }
119
120 // determine insert position
121 7578 char *dst = array->data;
122 7578 dst += index * elem_size;
123
124 // do we need to move some elements?
125 7578 size_t elems_to_move = array->size - index;
126
2/2
✓ Branch 0 (14→15) taken 172 times.
✓ Branch 1 (14→16) taken 7406 times.
7578 if (elems_to_move > 0) {
127 172 char *target = dst + n * elem_size;
128 172 memmove(target, dst, elems_to_move * elem_size);
129 }
130
131 // place the new elements, if any
132 // otherwise, this function just reserved the memory (a.k.a emplace)
133
2/2
✓ Branch 0 (16→17) taken 7509 times.
✓ Branch 1 (16→18) taken 69 times.
7578 if (other != NULL) {
134 7509 memcpy(dst, other, n * elem_size);
135 }
136 7578 array->size += n;
137
138 7578 return 0;
139 }
140
141 70 int cx_array_insert_sorted_c_(
142 const CxAllocator *allocator,
143 CxArray *array,
144 size_t elem_size,
145 const void *sorted_data,
146 size_t n,
147 cx_compare_func2 cmp_func,
148 void *context,
149 bool allow_duplicates
150 ) {
151 // assert pointers
152 assert(allocator != NULL);
153 assert(array != NULL);
154 assert(cmp_func != NULL);
155 assert(sorted_data != NULL);
156
157 // corner case
158
1/2
✗ Branch 0 (2→3) not taken.
✓ Branch 1 (2→4) taken 70 times.
70 if (n == 0) return 0;
159
160 // overflow check
161 // LCOV_EXCL_START
162 if (n > SIZE_MAX - array->size) {
163 errno = EOVERFLOW;
164 return 1;
165 }
166 // LCOV_EXCL_STOP
167
168 // store some counts
169 70 const size_t old_size = array->size;
170 70 const size_t old_capacity = array->capacity;
171 // the necessary capacity is the worst case assumption, including duplicates
172 70 const size_t needed_capacity = cx_array_grow_capacity(old_capacity, old_size + n);
173
174 // if we need more than we have, try a reallocation
175
2/2
✓ Branch 0 (7→8) taken 15 times.
✓ Branch 1 (7→12) taken 55 times.
70 if (needed_capacity > old_capacity) {
176
1/2
✗ Branch 0 (9→10) not taken.
✓ Branch 1 (9→11) taken 15 times.
15 if (cxReallocateArray(allocator, &array->data, needed_capacity, elem_size)) {
177 return -1; // LCOV_EXCL_LINE
178 }
179 15 array->capacity = needed_capacity;
180 }
181
182 // now we have guaranteed that we can insert everything
183 70 size_t new_size = old_size + n;
184 70 array->size = new_size;
185
186 // declare the source and destination indices/pointers
187 70 size_t si = 0, di = 0;
188 70 const char *src = sorted_data;
189 70 char *dest = array->data;
190
191 // find the first insertion point
192 70 di = cx_array_binary_search_sup_c(dest, old_size, elem_size, src, cmp_func, context);
193 70 dest += di * elem_size;
194
195 // move the remaining elements in the array completely to the right
196 // we will call it the "buffer" for parked elements
197 70 size_t buf_size = old_size - di;
198 70 size_t bi = new_size - buf_size;
199 70 char *bptr = ((char *) array->data) + bi * elem_size;
200 70 memmove(bptr, dest, buf_size * elem_size);
201
202 // while there are both source and buffered elements left,
203 // copy them interleaving
204
3/4
✓ Branch 0 (39→40) taken 126 times.
✗ Branch 1 (39→41) not taken.
✓ Branch 2 (40→14) taken 99 times.
✓ Branch 3 (40→41) taken 27 times.
126 while (si < n && bi < new_size) {
205 // determine how many source elements can be inserted.
206 // the first element that shall not be inserted is the smallest element
207 // that is strictly larger than the first buffered element
208 // (located at the index of the infimum plus one).
209 // the infimum is guaranteed to exist:
210 // - if all src elements are larger,
211 // there is no buffer, and this loop is skipped
212 // - if any src element is smaller or equal, the infimum exists
213 // - when all src elements that are smaller are copied, the second part
214 // of this loop body will copy the remaining buffer (emptying it)
215 // Therefore, the buffer can never contain an element that is smaller
216 // than any element in the source and the infimum exists.
217 size_t copy_len, bytes_copied;
218 99 copy_len = cx_array_binary_search_inf_c(
219 src, n - si, elem_size, bptr, cmp_func, context
220 );
221 99 copy_len++;
222
223 // copy the source elements
224
1/2
✓ Branch 0 (15→16) taken 99 times.
✗ Branch 1 (15→35) not taken.
99 if (copy_len > 0) {
225
2/2
✓ Branch 0 (16→17) taken 46 times.
✓ Branch 1 (16→18) taken 53 times.
99 if (allow_duplicates) {
226 // we can copy the entire chunk
227 46 bytes_copied = copy_len * elem_size;
228 46 memcpy(dest, src, bytes_copied);
229 46 dest += bytes_copied;
230 46 src += bytes_copied;
231 46 si += copy_len;
232 46 di += copy_len;
233 } else {
234 // first, check the end of the source chunk
235 // for being a duplicate of the bptr
236 53 const char *end_of_src = src + (copy_len - 1) * elem_size;
237 53 size_t skip_len = 0;
238
4/4
✓ Branch 0 (20→21) taken 60 times.
✓ Branch 1 (20→23) taken 13 times.
✓ Branch 2 (22→19) taken 20 times.
✓ Branch 3 (22→23) taken 40 times.
73 while (copy_len > 0 && cmp_func(bptr, end_of_src, context) == 0) {
239 20 end_of_src -= elem_size;
240 20 skip_len++;
241 20 copy_len--;
242 }
243
2/2
✓ Branch 0 (23→24) taken 49 times.
✓ Branch 1 (23→25) taken 4 times.
53 char *last = dest == array->data ? NULL : dest - elem_size;
244 // then iterate through the source chunk
245 // and skip all duplicates with the last element in the array
246 53 size_t more_skipped = 0;
247
2/2
✓ Branch 0 (33→27) taken 73 times.
✓ Branch 1 (33→34) taken 53 times.
126 for (unsigned j = 0; j < copy_len; j++) {
248
4/4
✓ Branch 0 (27→28) taken 69 times.
✓ Branch 1 (27→31) taken 4 times.
✓ Branch 2 (29→30) taken 13 times.
✓ Branch 3 (29→31) taken 56 times.
73 if (last != NULL && cmp_func(last, src, context) == 0) {
249 // duplicate - skip
250 13 src += elem_size;
251 13 si++;
252 13 more_skipped++;
253 } else {
254 60 memcpy(dest, src, elem_size);
255 60 src += elem_size;
256 60 last = dest;
257 60 dest += elem_size;
258 60 si++;
259 60 di++;
260 }
261 }
262 // skip the previously identified elements as well
263 53 src += skip_len * elem_size;
264 53 si += skip_len;
265 53 skip_len += more_skipped;
266 // reduce the actual size by the number of skipped elements
267 53 array->size -= skip_len;
268 }
269 }
270
271 // when all source elements are in place, we are done
272
2/2
✓ Branch 0 (35→36) taken 43 times.
✓ Branch 1 (35→37) taken 56 times.
99 if (si >= n) break;
273
274 // determine how many buffered elements need to be restored
275 56 copy_len = cx_array_binary_search_sup_c(
276 bptr,
277 new_size - bi,
278 elem_size,
279 src,
280 cmp_func,
281 context
282 );
283
284 // restore the buffered elements
285 56 bytes_copied = copy_len * elem_size;
286 56 memmove(dest, bptr, bytes_copied);
287 56 dest += bytes_copied;
288 56 bptr += bytes_copied;
289 56 di += copy_len;
290 56 bi += copy_len;
291 }
292
293 // still source elements left?
294
2/2
✓ Branch 0 (41→42) taken 27 times.
✓ Branch 1 (41→58) taken 43 times.
70 if (si < n) {
295
2/2
✓ Branch 0 (42→43) taken 15 times.
✓ Branch 1 (42→44) taken 12 times.
27 if (allow_duplicates) {
296 // duplicates allowed or nothing inserted yet: simply copy everything
297 15 memcpy(dest, src, elem_size * (n - si));
298 } else {
299 // we must check the remaining source elements one by one
300 // to skip the duplicates.
301 // Note that no source element can equal the last element in the
302 // destination, because that would have created an insertion point
303 // and a buffer, s.t. the above loop already handled the duplicates
304
2/2
✓ Branch 0 (57→45) taken 18 times.
✓ Branch 1 (57→58) taken 12 times.
30 while (si < n) {
305 // find a chain of elements that can be copied
306 18 size_t copy_len = 1, skip_len = 0;
307 {
308 18 const char *left_src = src;
309
2/2
✓ Branch 0 (55→46) taken 27 times.
✓ Branch 1 (55→56) taken 12 times.
39 while (si + copy_len + skip_len < n) {
310 27 const char *right_src = left_src + elem_size;
311 27 int d = cmp_func(left_src, right_src, context);
312
2/2
✓ Branch 0 (47→48) taken 20 times.
✓ Branch 1 (47→51) taken 7 times.
27 if (d < 0) {
313
2/2
✓ Branch 0 (48→49) taken 6 times.
✓ Branch 1 (48→50) taken 14 times.
20 if (skip_len > 0) {
314 // new larger element found;
315 // handle it in the next cycle
316 6 break;
317 }
318 14 left_src += elem_size;
319 14 copy_len++;
320
1/2
✓ Branch 0 (51→52) taken 7 times.
✗ Branch 1 (51→53) not taken.
7 } else if (d == 0) {
321 7 left_src += elem_size;
322 7 skip_len++;
323 } else {
324 // should be unreachable because the requirement is
325 // that the source array is sorted
326 break; // LCOV_EXCL_LINE
327 }
328 }
329 }
330 18 size_t bytes_copied = copy_len * elem_size;
331 18 memcpy(dest, src, bytes_copied);
332 18 dest += bytes_copied;
333 18 src += bytes_copied + skip_len * elem_size;
334 18 si += copy_len + skip_len;
335 18 di += copy_len;
336 18 array->size -= skip_len;
337 }
338 }
339 }
340
341 // buffered elements need to be moved when we skipped duplicates
342 70 size_t total_skipped = new_size - array->size;
343
4/4
✓ Branch 0 (58→59) taken 43 times.
✓ Branch 1 (58→61) taken 27 times.
✓ Branch 2 (59→60) taken 10 times.
✓ Branch 3 (59→61) taken 33 times.
70 if (bi < new_size && total_skipped > 0) {
344 // move the remaining buffer to the end of the array
345 10 memmove(dest, bptr, elem_size * (new_size - bi));
346 }
347
348 70 return 0;
349 }
350
351 23 int cx_array_insert_sorted_(
352 const CxAllocator *allocator,
353 CxArray *array,
354 size_t elem_size,
355 const void *sorted_data,
356 size_t n,
357 cx_compare_func cmp_func,
358 bool allow_duplicates
359 ) {
360 23 cx_compare_func_wrapper wrapper = {cmp_func};
361 23 return cx_array_insert_sorted_c_(allocator, array, elem_size, sorted_data,
362 n, cx_cmp_wrap, &wrapper, allow_duplicates);
363 }
364
365 #ifndef WITH_QSORT_R
366 static cx_thread_local cx_compare_func2 cx_array_fn_for_qsort;
367 static cx_thread_local void *cx_array_context_for_qsort;
368 static int cx_array_qsort_wrapper(const void *l, const void *r) {
369 return cx_array_fn_for_qsort(l, r, cx_array_context_for_qsort);
370 }
371 #endif
372
373 #if defined(WITH_QSORT_R) && defined(__APPLE__)
374 // macOS uses a different comparefunc signature for qsort_r
375 typedef struct QsortCmpFuncWrapper {
376 cx_compare_func2 fn;
377 void *context;
378 } QsortCmpFuncWrapper;
379
380 static int sort_comparefunc(void *context, const void *left, const void *right){
381 QsortCmpFuncWrapper *w = context;
382 return w->fn(left, right, w->context);
383 }
384 #endif
385
386 9 void cx_array_qsort_c(void *array, size_t nmemb, size_t size,
387 cx_compare_func2 fn, void *context) {
388 #ifdef WITH_QSORT_R
389 #ifndef __APPLE__
390 9 qsort_r(array, nmemb, size, fn, context);
391 #else
392 QsortCmpFuncWrapper wrapper;
393 wrapper.fn = fn;
394 wrapper.context = context;
395 qsort_r(array, nmemb, size, &wrapper, sort_comparefunc);
396 #endif
397 #else
398 cx_array_fn_for_qsort = fn;
399 cx_array_context_for_qsort = context;
400 qsort(array, nmemb, size, cx_array_qsort_wrapper);
401 #endif
402 9 }
403
404 1 void cx_array_sort_(CxArray *array, size_t elem_size,
405 cx_compare_func fn) {
406 1 qsort(array->data, array->size, elem_size, fn);
407 1 }
408
409 1 void cx_array_sort_c_(CxArray *array, size_t elem_size,
410 cx_compare_func2 fn, void *context) {
411 1 cx_array_qsort_c(array->data, array->size, elem_size, fn, context);
412 1 }
413
414 1 CxIterator cx_array_iterator_(CxArray *array, size_t elem_size) {
415 1 return cxIterator(array->data, elem_size, array->size);
416 }
417
418 82 CxIterator cx_array_iterator_ptr_(CxArray *array) {
419 82 return cxIteratorPtr(array->data, array->size);
420 }
421
422 17 void cx_array_remove_(CxArray *array, size_t elem_size, size_t index, size_t n, bool fast) {
423
1/2
✗ Branch 0 (2→3) not taken.
✓ Branch 1 (2→4) taken 17 times.
17 if (n == 0) return;
424
2/2
✓ Branch 0 (4→5) taken 4 times.
✓ Branch 1 (4→6) taken 13 times.
17 if (index >= array->size) return;
425
2/2
✓ Branch 0 (6→7) taken 4 times.
✓ Branch 1 (6→8) taken 9 times.
13 if (index + n >= array->size) {
426 // only tail elements are removed
427 4 array->size = index;
428 4 return;
429 }
430 9 array->size -= n;
431 9 size_t remaining = array->size - index;
432 9 char *dest = ((char*)array->data) + index * elem_size;
433
2/2
✓ Branch 0 (8→9) taken 6 times.
✓ Branch 1 (8→20) taken 3 times.
9 if (fast) {
434 6 char *src = dest + remaining * elem_size;
435
3/4
✓ Branch 0 (9→10) taken 5 times.
✓ Branch 1 (9→19) taken 1 times.
✓ Branch 2 (10→11) taken 5 times.
✗ Branch 3 (10→19) not taken.
6 if (n == 1 && elem_size <= CX_WORDSIZE/8) {
436 // try to optimize int-sized values
437 // (from likely to unlikely)
438
2/2
✓ Branch 0 (11→12) taken 1 times.
✓ Branch 1 (11→13) taken 4 times.
5 if (elem_size == sizeof(int32_t)) {
439 1 *(int32_t*)dest = *(int32_t*)src;
440 1 return;
441 }
442 #if CX_WORDSIZE == 64
443
2/2
✓ Branch 0 (13→14) taken 2 times.
✓ Branch 1 (13→15) taken 2 times.
4 if (elem_size == sizeof(int64_t)) {
444 2 *(int64_t*)dest = *(int64_t*)src;
445 2 return;
446 }
447 #endif
448
2/2
✓ Branch 0 (15→16) taken 1 times.
✓ Branch 1 (15→17) taken 1 times.
2 if (elem_size == sizeof(int8_t)) {
449 1 *(int8_t*)dest = *(int8_t*)src;
450 1 return;
451 }
452
1/2
✓ Branch 0 (17→18) taken 1 times.
✗ Branch 1 (17→19) not taken.
1 if (elem_size == sizeof(int16_t)) {
453 1 *(int16_t*)dest = *(int16_t*)src;
454 1 return;
455 }
456 // note we cannot optimize the last branch, because
457 // the elem_size could be crazily misaligned
458 }
459 1 memcpy(dest, src, n * elem_size);
460 } else {
461 3 char *src = dest + n * elem_size;
462 3 memmove(dest, src, remaining * elem_size);
463 }
464 }
465
466 279 void cx_array_free_(const CxAllocator *allocator, CxArray *array) {
467 279 cxFree(allocator, array->data);
468 279 array->data = NULL;
469 279 array->size = array->capacity = 0;
470 279 }
471
472
473 // implementation that finds ANY index
474 302 static size_t cx_array_binary_search_inf_impl(
475 const void *arr,
476 size_t size,
477 size_t elem_size,
478 const void *elem,
479 cx_compare_func2 cmp_func,
480 void *context
481 ) {
482 // special case: empty array
483
2/2
✓ Branch 0 (2→3) taken 15 times.
✓ Branch 1 (2→4) taken 287 times.
302 if (size == 0) return 0;
484
485 // declare a variable that will contain the compare results
486 int result;
487
488 // cast the array pointer to something we can use offsets with
489 287 const char *array = arr;
490
491 // check the first array element
492 287 result = cmp_func(elem, array, context);
493
2/2
✓ Branch 0 (5→6) taken 17 times.
✓ Branch 1 (5→7) taken 270 times.
287 if (result < 0) {
494 17 return size;
495
2/2
✓ Branch 0 (7→8) taken 28 times.
✓ Branch 1 (7→9) taken 242 times.
270 } else if (result == 0) {
496 28 return 0;
497 }
498
499 // special case: there is only one element and that is smaller
500
2/2
✓ Branch 0 (9→10) taken 35 times.
✓ Branch 1 (9→11) taken 207 times.
242 if (size == 1) return 0;
501
502 // check the last array element
503 207 result = cmp_func(elem, array + elem_size * (size - 1), context);
504
2/2
✓ Branch 0 (12→13) taken 26 times.
✓ Branch 1 (12→14) taken 181 times.
207 if (result >= 0) {
505 26 return size - 1;
506 }
507
508 // the element is now guaranteed to be somewhere in the list
509 // so start the binary search
510 181 size_t left_index = 1;
511 181 size_t right_index = size - 1;
512 181 size_t pivot_index = 0;
513
514
2/2
✓ Branch 0 (22→15) taken 507 times.
✓ Branch 1 (22→23) taken 129 times.
636 while (left_index <= right_index) {
515 507 pivot_index = left_index + (right_index - left_index) / 2;
516 507 const char *arr_elem = array + pivot_index * elem_size;
517 507 result = cmp_func(elem, arr_elem, context);
518
2/2
✓ Branch 0 (16→17) taken 52 times.
✓ Branch 1 (16→18) taken 455 times.
507 if (result == 0) {
519 // found it!
520 52 return pivot_index;
521
2/2
✓ Branch 0 (18→19) taken 252 times.
✓ Branch 1 (18→20) taken 203 times.
455 } else if (result < 0) {
522 // element is smaller than pivot, continue search left
523 252 right_index = pivot_index - 1;
524 } else {
525 // element is larger than pivot, continue search right
526 203 left_index = pivot_index + 1;
527 }
528 }
529
530 // report the largest upper bound
531
2/2
✓ Branch 0 (23→24) taken 88 times.
✓ Branch 1 (23→25) taken 41 times.
129 return result < 0 ? (pivot_index - 1) : pivot_index;
532 }
533
534 160 size_t cx_array_binary_search_inf_c(
535 const void *arr,
536 size_t size,
537 size_t elem_size,
538 const void *elem,
539 cx_compare_func2 cmp_func,
540 void *context
541 ) {
542 160 size_t index = cx_array_binary_search_inf_impl(
543 arr, size, elem_size, elem, cmp_func, context);
544 // in case of equality, report the largest index
545 160 const char *e = ((const char *) arr) + (index + 1) * elem_size;
546
4/4
✓ Branch 0 (5→6) taken 105 times.
✓ Branch 1 (5→8) taken 60 times.
✓ Branch 2 (7→4) taken 5 times.
✓ Branch 3 (7→8) taken 100 times.
165 while (index + 1 < size && cmp_func(e, elem, context) == 0) {
547 5 e += elem_size;
548 5 index++;
549 }
550 160 return index;
551 }
552
553 46 size_t cx_array_binary_search_c(
554 const void *arr,
555 size_t size,
556 size_t elem_size,
557 const void *elem,
558 cx_compare_func2 cmp_func,
559 void *context
560 ) {
561 46 size_t index = cx_array_binary_search_inf_c(
562 arr, size, elem_size, elem, cmp_func, context
563 );
564
4/4
✓ Branch 0 (3→4) taken 40 times.
✓ Branch 1 (3→7) taken 6 times.
✓ Branch 2 (5→6) taken 31 times.
✓ Branch 3 (5→7) taken 9 times.
46 if (index < size && cmp_func(((const char *) arr) + index * elem_size,
565 elem, context) == 0) {
566 31 return index;
567 } else {
568 15 return size;
569 }
570 }
571
572 142 size_t cx_array_binary_search_sup_c(
573 const void *arr,
574 size_t size,
575 size_t elem_size,
576 const void *elem,
577 cx_compare_func2 cmp_func,
578 void *context
579 ) {
580 142 size_t index = cx_array_binary_search_inf_impl(
581 arr, size, elem_size, elem, cmp_func, context
582 );
583 142 const char *e = ((const char *) arr) + index * elem_size;
584
2/2
✓ Branch 0 (3→4) taken 23 times.
✓ Branch 1 (3→5) taken 119 times.
142 if (index == size) {
585 // no infimum means the first element is supremum
586 23 return 0;
587
2/2
✓ Branch 0 (6→7) taken 27 times.
✓ Branch 1 (6→13) taken 92 times.
119 } else if (cmp_func(e, elem, context) == 0) {
588 // found an equal element, search the smallest index
589 27 e -= elem_size; // e now contains the element at index-1
590
4/4
✓ Branch 0 (9→10) taken 27 times.
✓ Branch 1 (9→12) taken 1 times.
✓ Branch 2 (11→8) taken 1 times.
✓ Branch 3 (11→12) taken 26 times.
28 while (index > 0 && cmp_func(e, elem, context) == 0) {
591 1 e -= elem_size;
592 1 index--;
593 }
594 27 return index;
595 } else {
596 // we already have the largest index of the infimum (by design)
597 // the next element is the supremum (or there is no supremum)
598 92 return index + 1;
599 }
600 }
601
602 15 size_t cx_array_binary_search_inf(
603 const void *arr,
604 size_t size,
605 size_t elem_size,
606 const void *elem,
607 cx_compare_func cmp_func
608 ) {
609 15 cx_compare_func_wrapper wrapper = {cmp_func};
610 15 return cx_array_binary_search_inf_c(arr, size, elem_size, elem, cx_cmp_wrap, &wrapper);
611 }
612
613 40 size_t cx_array_binary_search(
614 const void *arr,
615 size_t size,
616 size_t elem_size,
617 const void *elem,
618 cx_compare_func cmp_func
619 ) {
620 40 cx_compare_func_wrapper wrapper = {cmp_func};
621 40 return cx_array_binary_search_c(arr, size, elem_size, elem, cx_cmp_wrap, &wrapper);
622 }
623
624 16 size_t cx_array_binary_search_sup(
625 const void *arr,
626 size_t size,
627 size_t elem_size,
628 const void *elem,
629 cx_compare_func cmp_func
630 ) {
631 16 cx_compare_func_wrapper wrapper = {cmp_func};
632 16 return cx_array_binary_search_sup_c(arr, size, elem_size, elem, cx_cmp_wrap, &wrapper);
633 }
634
635 #ifndef CX_ARRAY_SWAP_SBO_SIZE
636 #define CX_ARRAY_SWAP_SBO_SIZE 128
637 #endif
638 const unsigned cx_array_swap_sbo_size = CX_ARRAY_SWAP_SBO_SIZE;
639
640 67 void cx_array_swap(
641 void *arr,
642 size_t elem_size,
643 size_t idx1,
644 size_t idx2
645 ) {
646 assert(arr != NULL);
647
648 // short circuit
649
2/2
✓ Branch 0 (2→3) taken 2 times.
✓ Branch 1 (2→4) taken 65 times.
67 if (idx1 == idx2) return;
650
651 char sbo_mem[CX_ARRAY_SWAP_SBO_SIZE];
652 void *tmp;
653
654 // decide if we can use the local buffer
655
2/2
✓ Branch 0 (4→5) taken 1 times.
✓ Branch 1 (4→8) taken 64 times.
65 if (elem_size > CX_ARRAY_SWAP_SBO_SIZE) {
656 1 tmp = cxMallocDefault(elem_size);
657 // we don't want to enforce error handling
658
1/2
✗ Branch 0 (6→7) not taken.
✓ Branch 1 (6→9) taken 1 times.
1 if (tmp == NULL) abort();
659 } else {
660 64 tmp = sbo_mem;
661 }
662
663 // calculate memory locations
664 65 char *left = arr, *right = arr;
665 65 left += idx1 * elem_size;
666 65 right += idx2 * elem_size;
667
668 // three-way swap
669 65 memcpy(tmp, left, elem_size);
670 65 memcpy(left, right, elem_size);
671 65 memcpy(right, tmp, elem_size);
672
673 // free dynamic memory, if it was needed
674
2/2
✓ Branch 0 (9→10) taken 1 times.
✓ Branch 1 (9→11) taken 64 times.
65 if (tmp != sbo_mem) {
675 1 cxFreeDefault(tmp);
676 }
677 }
678
679 // HIGH LEVEL ARRAY LIST FUNCTIONS
680
681 typedef struct {
682 struct cx_list_s base;
683 void *data;
684 size_t capacity;
685 } cx_array_list;
686
687 199 static void cx_arl_destructor(struct cx_list_s *list) {
688 199 cx_array_list *arl = (cx_array_list *) list;
689
690 199 char *ptr = arl->data;
691
692
2/2
✓ Branch 0 (2→3) taken 5 times.
✓ Branch 1 (2→10) taken 194 times.
199 if (list->collection.simple_destructor) {
693
2/2
✓ Branch 0 (9→4) taken 25 times.
✓ Branch 1 (9→10) taken 5 times.
30 for (size_t i = 0; i < list->collection.size; i++) {
694
2/2
✓ Branch 0 (4→5) taken 13 times.
✓ Branch 1 (4→6) taken 12 times.
25 cx_invoke_simple_destructor(list, ptr);
695 25 ptr += list->collection.elem_size;
696 }
697 }
698
2/2
✓ Branch 0 (10→11) taken 15 times.
✓ Branch 1 (10→18) taken 184 times.
199 if (list->collection.advanced_destructor) {
699
2/2
✓ Branch 0 (17→12) taken 127 times.
✓ Branch 1 (17→18) taken 15 times.
142 for (size_t i = 0; i < list->collection.size; i++) {
700
1/2
✓ Branch 0 (12→13) taken 127 times.
✗ Branch 1 (12→14) not taken.
127 cx_invoke_advanced_destructor(list, ptr);
701 127 ptr += list->collection.elem_size;
702 }
703 }
704
705 199 cxFree(list->collection.allocator, arl->data);
706 199 cxFree(list->collection.allocator, list);
707 199 }
708
709 7148 static size_t cx_arl_insert_array(
710 struct cx_list_s *list,
711 size_t index,
712 const void *array,
713 size_t n
714 ) {
715 7148 cx_array_list *arl = (cx_array_list *) list;
716 7148 CxArray wrap = {
717 7148 arl->data, list->collection.size, arl->capacity
718 };
719
2/2
✓ Branch 0 (3→4) taken 4 times.
✓ Branch 1 (3→5) taken 7144 times.
7148 if (cx_array_insert_(list->collection.allocator, &wrap,
720 7148 list->collection.elem_size, index, array, n)) {
721 4 return 0;
722 }
723 7144 arl->data = wrap.data;
724 7144 arl->capacity = wrap.capacity;
725 7144 list->collection.size = wrap.size;
726 7144 return n;
727 }
728
729 47 static size_t cx_arl_insert_sorted_impl(
730 struct cx_list_s *list,
731 const void *sorted_data,
732 size_t n,
733 bool allow_duplicates
734 ) {
735 47 cx_array_list *arl = (cx_array_list *) list;
736 47 CxArray wrap = {
737 47 arl->data, list->collection.size, arl->capacity
738 };
739
740
1/2
✗ Branch 0 (3→4) not taken.
✓ Branch 1 (3→5) taken 47 times.
47 if (cx_array_insert_sorted_c_(
741 list->collection.allocator,
742 &wrap,
743 47 list->collection.elem_size,
744 sorted_data,
745 n,
746 cx_list_compare_wrapper,
747 list,
748 allow_duplicates
749 )) {
750 // array list implementation is "all or nothing"
751 return 0; // LCOV_EXCL_LINE
752 }
753 47 arl->data = wrap.data;
754 47 arl->capacity = wrap.capacity;
755 47 list->collection.size = wrap.size;
756 47 return n;
757 }
758
759 25 static size_t cx_arl_insert_sorted(
760 struct cx_list_s *list,
761 const void *sorted_data,
762 size_t n
763 ) {
764 25 return cx_arl_insert_sorted_impl(list, sorted_data, n, true);
765 }
766
767 22 static size_t cx_arl_insert_unique(
768 struct cx_list_s *list,
769 const void *sorted_data,
770 size_t n
771 ) {
772 22 return cx_arl_insert_sorted_impl(list, sorted_data, n, false);
773 }
774
775 7075 static void *cx_arl_insert_element(
776 struct cx_list_s *list,
777 size_t index,
778 const void *element
779 ) {
780
2/2
✓ Branch 0 (3→4) taken 7071 times.
✓ Branch 1 (3→5) taken 4 times.
7075 if (cx_arl_insert_array(list, index, element, 1) == 1) {
781 7071 return ((char*)((cx_array_list *) list)->data) + index * list->collection.elem_size;
782 } else {
783 4 return NULL;
784 }
785 }
786
787 10 static int cx_arl_insert_iter(
788 struct cx_iterator_s *iter,
789 const void *elem,
790 int prepend
791 ) {
792 10 struct cx_list_s *list = iter->src_handle;
793
2/2
✓ Branch 0 (2→3) taken 6 times.
✓ Branch 1 (2→9) taken 4 times.
10 if (iter->index < list->collection.size) {
794
1/2
✗ Branch 0 (4→5) not taken.
✓ Branch 1 (4→6) taken 6 times.
6 if (cx_arl_insert_element(list,
795 6 iter->index + 1 - prepend, elem) == NULL) {
796 return 1; // LCOV_EXCL_LINE
797 }
798 6 iter->elem_count++;
799
2/2
✓ Branch 0 (6→7) taken 4 times.
✓ Branch 1 (6→8) taken 2 times.
6 if (prepend != 0) {
800 4 iter->index++;
801 4 iter->elem_handle = ((char *) iter->elem_handle) + list->collection.elem_size;
802 }
803 6 return 0;
804 } else {
805
1/2
✗ Branch 0 (10→11) not taken.
✓ Branch 1 (10→12) taken 4 times.
4 if (cx_arl_insert_element(list, list->collection.size, elem) == NULL) {
806 return 1; // LCOV_EXCL_LINE
807 }
808 4 iter->elem_count++;
809 4 iter->index = list->collection.size;
810 4 return 0;
811 }
812 }
813
814 120 static size_t cx_arl_remove(
815 struct cx_list_s *list,
816 size_t index,
817 size_t num,
818 void *targetbuf
819 ) {
820 120 cx_array_list *arl = (cx_array_list *) list;
821
822 // out-of-bounds check
823 size_t remove;
824
2/2
✓ Branch 0 (2→3) taken 12 times.
✓ Branch 1 (2→4) taken 108 times.
120 if (index >= list->collection.size) {
825 12 remove = 0;
826
2/2
✓ Branch 0 (4→5) taken 2 times.
✓ Branch 1 (4→6) taken 106 times.
108 } else if (index + num > list->collection.size) {
827 2 remove = list->collection.size - index;
828 } else {
829 106 remove = num;
830 }
831
832 // easy exit
833
2/2
✓ Branch 0 (7→8) taken 12 times.
✓ Branch 1 (7→9) taken 108 times.
120 if (remove == 0) return 0;
834
835 // destroy or copy contents
836
2/2
✓ Branch 0 (9→10) taken 92 times.
✓ Branch 1 (9→23) taken 16 times.
108 if (targetbuf == NULL) {
837
2/2
✓ Branch 0 (22→11) taken 130 times.
✓ Branch 1 (22→24) taken 92 times.
222 for (size_t idx = index; idx < index + remove; idx++) {
838
8/8
✓ Branch 0 (11→12) taken 24 times.
✓ Branch 1 (11→16) taken 106 times.
✓ Branch 2 (12→13) taken 12 times.
✓ Branch 3 (12→14) taken 12 times.
✓ Branch 4 (16→17) taken 8 times.
✓ Branch 5 (16→21) taken 122 times.
✓ Branch 6 (17→18) taken 4 times.
✓ Branch 7 (17→19) taken 4 times.
130 cx_invoke_destructor(
839 list,
840 ((char *) arl->data) + idx * list->collection.elem_size
841 );
842 }
843 } else {
844 16 memcpy(
845 targetbuf,
846 16 ((char *) arl->data) + index * list->collection.elem_size,
847 16 remove * list->collection.elem_size
848 );
849 }
850
851 // calculate how many elements would need to be moved
852 108 size_t remaining = list->collection.size - index - remove;
853
854 // short-circuit removal of last elements
855
2/2
✓ Branch 0 (24→25) taken 20 times.
✓ Branch 1 (24→26) taken 88 times.
108 if (remaining == 0) {
856 20 list->collection.size -= remove;
857 20 return remove;
858 }
859
860 // just move the elements to the left
861 88 char *dst_move = arl->data;
862 88 dst_move += index * list->collection.elem_size;
863 88 char *first_remaining = dst_move + remove * list->collection.elem_size;
864 88 memmove(dst_move, first_remaining, remaining * list->collection.elem_size);
865
866 // decrease the size
867 88 list->collection.size -= remove;
868
869 88 return remove;
870 }
871
872 10 static void cx_arl_clear(struct cx_list_s *list) {
873
1/2
✗ Branch 0 (2→3) not taken.
✓ Branch 1 (2→4) taken 10 times.
10 if (list->collection.size == 0) return;
874
875 10 cx_array_list *arl = (cx_array_list *) list;
876 10 char *ptr = arl->data;
877
878
2/2
✓ Branch 0 (4→5) taken 2 times.
✓ Branch 1 (4→12) taken 8 times.
10 if (list->collection.simple_destructor) {
879
2/2
✓ Branch 0 (11→6) taken 112 times.
✓ Branch 1 (11→12) taken 2 times.
114 for (size_t i = 0; i < list->collection.size; i++) {
880
2/2
✓ Branch 0 (6→7) taken 56 times.
✓ Branch 1 (6→8) taken 56 times.
112 cx_invoke_simple_destructor(list, ptr);
881 112 ptr += list->collection.elem_size;
882 }
883 }
884
2/2
✓ Branch 0 (12→13) taken 2 times.
✓ Branch 1 (12→20) taken 8 times.
10 if (list->collection.advanced_destructor) {
885
2/2
✓ Branch 0 (19→14) taken 142 times.
✓ Branch 1 (19→20) taken 2 times.
144 for (size_t i = 0; i < list->collection.size; i++) {
886
2/2
✓ Branch 0 (14→15) taken 71 times.
✓ Branch 1 (14→16) taken 71 times.
142 cx_invoke_advanced_destructor(list, ptr);
887 142 ptr += list->collection.elem_size;
888 }
889 }
890
891 10 memset(arl->data, 0, list->collection.size * list->collection.elem_size);
892 10 list->collection.size = 0;
893 }
894
895 23 static int cx_arl_swap(
896 struct cx_list_s *list,
897 size_t i,
898 size_t j
899 ) {
900
4/4
✓ Branch 0 (2→3) taken 19 times.
✓ Branch 1 (2→4) taken 4 times.
✓ Branch 2 (3→4) taken 2 times.
✓ Branch 3 (3→5) taken 17 times.
23 if (i >= list->collection.size || j >= list->collection.size) return 1;
901 17 cx_array_list *arl = (cx_array_list *) list;
902 17 cx_array_swap(arl->data, list->collection.elem_size, i, j);
903 17 return 0;
904 }
905
906 9580 static void *cx_arl_at(
907 const struct cx_list_s *list,
908 size_t index
909 ) {
910
2/2
✓ Branch 0 (2→3) taken 9572 times.
✓ Branch 1 (2→4) taken 8 times.
9580 if (index < list->collection.size) {
911 9572 const cx_array_list *arl = (const cx_array_list *) list;
912 9572 char *space = arl->data;
913 9572 return space + index * list->collection.elem_size;
914 } else {
915 8 return NULL;
916 }
917 }
918
919 177 static size_t cx_arl_find_remove(
920 struct cx_list_s *list,
921 const void *elem,
922 bool remove
923 ) {
924 assert(list != NULL);
925
1/2
✗ Branch 0 (2→3) not taken.
✓ Branch 1 (2→4) taken 177 times.
177 if (list->collection.size == 0) return 0;
926 177 char *cur = ((const cx_array_list *) list)->data;
927
928 // optimize with binary search, when sorted
929
2/2
✓ Branch 0 (4→5) taken 6 times.
✓ Branch 1 (4→10) taken 171 times.
177 if (list->collection.sorted) {
930 6 size_t i = cx_array_binary_search_c(
931 cur,
932 6 list->collection.size,
933 6 list->collection.elem_size,
934 elem,
935 cx_list_compare_wrapper,
936 list
937 );
938
4/4
✓ Branch 0 (6→7) taken 4 times.
✓ Branch 1 (6→9) taken 2 times.
✓ Branch 2 (7→8) taken 2 times.
✓ Branch 3 (7→9) taken 2 times.
6 if (remove && i < list->collection.size) {
939 2 cx_arl_remove(list, i, 1, NULL);
940 }
941 6 return i;
942 }
943
944 // fallback: linear search
945
2/2
✓ Branch 0 (17→11) taken 17672 times.
✓ Branch 1 (17→18) taken 83 times.
17755 for (size_t i = 0; i < list->collection.size; i++) {
946
2/2
✓ Branch 0 (12→13) taken 88 times.
✓ Branch 1 (12→16) taken 17584 times.
17672 if (0 == cx_list_compare_wrapper(elem, cur, list)) {
947
2/2
✓ Branch 0 (13→14) taken 4 times.
✓ Branch 1 (13→15) taken 84 times.
88 if (remove) {
948 4 cx_arl_remove(list, i, 1, NULL);
949 }
950 88 return i;
951 }
952 17584 cur += list->collection.elem_size;
953 }
954 83 return list->collection.size;
955 }
956
957 4 static void cx_arl_sort(struct cx_list_s *list) {
958 4 cx_array_qsort_c(((cx_array_list *) list)->data,
959 4 list->collection.size,
960 4 list->collection.elem_size,
961 cx_list_compare_wrapper,
962 list
963 );
964 4 }
965
966 32 static int cx_arl_compare(
967 const struct cx_list_s *list,
968 const struct cx_list_s *other
969 ) {
970
2/2
✓ Branch 0 (2→3) taken 24 times.
✓ Branch 1 (2→10) taken 8 times.
32 if (list->collection.size == other->collection.size) {
971 24 const char *left = ((const cx_array_list *) list)->data;
972 24 const char *right = ((const cx_array_list *) other)->data;
973
2/2
✓ Branch 0 (8→4) taken 723 times.
✓ Branch 1 (8→9) taken 16 times.
739 for (size_t i = 0; i < list->collection.size; i++) {
974 723 int d = cx_list_compare_wrapper(left, right, (void*)list);
975
2/2
✓ Branch 0 (5→6) taken 8 times.
✓ Branch 1 (5→7) taken 715 times.
723 if (d != 0) {
976 8 return d;
977 }
978 715 left += list->collection.elem_size;
979 715 right += other->collection.elem_size;
980 }
981 16 return 0;
982 } else {
983
2/2
✓ Branch 0 (10→11) taken 4 times.
✓ Branch 1 (10→12) taken 4 times.
8 return list->collection.size < other->collection.size ? -1 : 1;
984 }
985 }
986
987 2 static void cx_arl_reverse(struct cx_list_s *list) {
988
1/2
✗ Branch 0 (2→3) not taken.
✓ Branch 1 (2→4) taken 2 times.
2 if (list->collection.size < 2) return;
989 2 void *data = ((const cx_array_list *) list)->data;
990 2 size_t half = list->collection.size / 2;
991
2/2
✓ Branch 0 (7→5) taken 50 times.
✓ Branch 1 (7→8) taken 2 times.
52 for (size_t i = 0; i < half; i++) {
992 50 cx_array_swap(data, list->collection.elem_size, i, list->collection.size - 1 - i);
993 }
994 }
995
996 525 static bool cx_arl_iter_valid(const void *it) {
997 525 const struct cx_iterator_s *iter = it;
998 525 const struct cx_list_s *list = iter->src_handle;
999 525 return iter->index < list->collection.size;
1000 }
1001
1002 8593 static void *cx_arl_iter_current(const void *it) {
1003 8593 const struct cx_iterator_s *iter = it;
1004 8593 return iter->elem_handle;
1005 }
1006
1007 8246 static void cx_arl_iter_next(void *it) {
1008 8246 struct cx_iterator_s *iter = it;
1009
2/2
✓ Branch 0 (2→3) taken 30 times.
✓ Branch 1 (2→5) taken 8216 times.
8246 if (iter->base.remove) {
1010 30 iter->base.remove = false;
1011 30 cx_arl_remove(iter->src_handle, iter->index, 1, NULL);
1012 30 iter->elem_count--;
1013 } else {
1014 8216 iter->index++;
1015 8216 iter->elem_handle =
1016 8216 ((char *) iter->elem_handle)
1017 8216 + ((const struct cx_list_s *) iter->src_handle)->collection.elem_size;
1018 }
1019 8246 }
1020
1021 222 static void cx_arl_iter_prev(void *it) {
1022 222 struct cx_iterator_s *iter = it;
1023
2/2
✓ Branch 0 (2→3) taken 28 times.
✓ Branch 1 (2→5) taken 194 times.
222 if (iter->base.remove) {
1024 28 iter->base.remove = false;
1025 28 cx_arl_remove(iter->src_handle, iter->index, 1, NULL);
1026 28 iter->elem_count--;
1027 }
1028 222 iter->index--;
1029 222 cx_array_list *list = iter->src_handle;
1030
2/2
✓ Branch 0 (5→6) taken 214 times.
✓ Branch 1 (5→7) taken 8 times.
222 if (iter->index < list->base.collection.size) {
1031 214 iter->elem_handle = ((char *) list->data)
1032 214 + iter->index * list->base.collection.elem_size;
1033 }
1034 222 }
1035
1036 4 static int cx_arl_change_capacity(
1037 struct cx_list_s *list,
1038 size_t new_capacity
1039 ) {
1040 4 cx_array_list *arl = (cx_array_list *)list;
1041 4 return cxReallocateArray(list->collection.allocator,
1042 &arl->data, new_capacity, list->collection.elem_size);
1043 }
1044
1045 322 static struct cx_iterator_s cx_arl_iterator(
1046 const struct cx_list_s *list,
1047 size_t index,
1048 bool backwards
1049 ) {
1050 struct cx_iterator_s iter;
1051
1052 322 iter.index = index;
1053 322 iter.src_handle = (void*)list;
1054 322 iter.elem_handle = cx_arl_at(list, index);
1055 322 iter.elem_size = list->collection.elem_size;
1056 322 iter.elem_count = list->collection.size;
1057 322 iter.base.valid = cx_arl_iter_valid;
1058 322 iter.base.current = cx_arl_iter_current;
1059
2/2
✓ Branch 0 (3→4) taken 12 times.
✓ Branch 1 (3→5) taken 310 times.
322 iter.base.next = backwards ? cx_arl_iter_prev : cx_arl_iter_next;
1060 322 iter.base.remove = false;
1061 322 iter.base.allow_remove = true;
1062
1063 322 return iter;
1064 }
1065
1066 static cx_list_class cx_array_list_class = {
1067 cx_arl_destructor,
1068 cx_arl_insert_element,
1069 cx_arl_insert_array,
1070 cx_arl_insert_sorted,
1071 cx_arl_insert_unique,
1072 cx_arl_insert_iter,
1073 cx_arl_remove,
1074 cx_arl_clear,
1075 cx_arl_swap,
1076 cx_arl_at,
1077 cx_arl_find_remove,
1078 cx_arl_sort,
1079 cx_arl_compare,
1080 cx_arl_reverse,
1081 cx_arl_change_capacity,
1082 cx_arl_iterator,
1083 };
1084
1085 199 CxList *cxArrayListCreate(
1086 const CxAllocator *allocator,
1087 size_t elem_size,
1088 size_t initial_capacity
1089 ) {
1090
2/2
✓ Branch 0 (2→3) taken 18 times.
✓ Branch 1 (2→4) taken 181 times.
199 if (allocator == NULL) {
1091 18 allocator = cxDefaultAllocator;
1092 }
1093
1094 199 cx_array_list *list = cxCalloc(allocator, 1, sizeof(cx_array_list));
1095
1/2
✗ Branch 0 (5→6) not taken.
✓ Branch 1 (5→7) taken 199 times.
199 if (list == NULL) return NULL;
1096 199 cx_list_init((CxList*)list, &cx_array_list_class, allocator, elem_size);
1097 199 list->capacity = initial_capacity;
1098
1099 // allocate the array after the real elem_size is known
1100 199 list->data = cxCalloc(allocator, initial_capacity,
1101 list->base.collection.elem_size);
1102 if (list->data == NULL) { // LCOV_EXCL_START
1103 cxFree(allocator, list);
1104 return NULL;
1105 } // LCOV_EXCL_STOP
1106
1107 199 return (CxList *) list;
1108 }
1109