Skip to content

Commit 5a4bcb2

Browse files
committed
Define ROUNDUP_PAGESIZE, ROUNDUP_GRANULE_SIZE macros (code refactoring)
* alloc.c (GC_expand_hp_inner): Use ROUNDUP_PAGESIZE(). * checksums.c (GC_record_fault, GC_was_faulted): Likewise. * os_dep.c (GC_unix_mmap_get_mem, GC_wince_get_mem, GC_unmap_start, GC_remove_protection): Likewise. * headers.c (GC_scratch_alloc): Use ROUNDUP_GRANULE_SIZE(). * malloc.c (GC_alloc_large): Likewise. * mallocx.c (GC_malloc_many): Likewise. * headers.c (GC_scratch_alloc): Use ROUNDUP_PAGESIZE() (only if USE_MMAP). * include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDUP_PAGESIZE): Define macro to round up a value to a multiple of a granule or a page, respectively.
1 parent ff6c3d9 commit 5a4bcb2

File tree

7 files changed

+23
-34
lines changed

7 files changed

+23
-34
lines changed

alloc.c

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1169,14 +1169,7 @@ GC_INNER GC_bool GC_expand_hp_inner(word n)
11691169
/* heap to expand soon. */
11701170

11711171
if (n < MINHINCR) n = MINHINCR;
1172-
bytes = n * HBLKSIZE;
1173-
/* Make sure bytes is a multiple of GC_page_size */
1174-
{
1175-
word mask = GC_page_size - 1;
1176-
bytes += mask;
1177-
bytes &= ~mask;
1178-
}
1179-
1172+
bytes = ROUNDUP_PAGESIZE(n * HBLKSIZE);
11801173
if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
11811174
/* Exceeded self-imposed limit */
11821175
return(FALSE);

checksums.c

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -41,21 +41,17 @@ STATIC size_t GC_n_faulted = 0;
4141

4242
void GC_record_fault(struct hblk * h)
4343
{
44-
word page = (word)h;
44+
word page = ROUNDUP_PAGESIZE((word)h);
4545

46-
page += GC_page_size - 1;
47-
page &= ~(GC_page_size - 1);
4846
if (GC_n_faulted >= NSUMS) ABORT("write fault log overflowed");
4947
GC_faulted[GC_n_faulted++] = page;
5048
}
5149

5250
STATIC GC_bool GC_was_faulted(struct hblk *h)
5351
{
5452
size_t i;
55-
word page = (word)h;
53+
word page = ROUNDUP_PAGESIZE((word)h);
5654

57-
page += GC_page_size - 1;
58-
page &= ~(GC_page_size - 1);
5955
for (i = 0; i < GC_n_faulted; ++i) {
6056
if (GC_faulted[i] == page) return TRUE;
6157
}

headers.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -119,8 +119,7 @@ GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
119119
{
120120
register ptr_t result = scratch_free_ptr;
121121

122-
bytes += GRANULE_BYTES-1;
123-
bytes &= ~(GRANULE_BYTES-1);
122+
bytes = ROUNDUP_GRANULE_SIZE(bytes);
124123
scratch_free_ptr += bytes;
125124
if ((word)scratch_free_ptr <= (word)GC_scratch_end_ptr) {
126125
return(result);
@@ -132,8 +131,7 @@ GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
132131
/* Undo the damage, and get memory directly */
133132
bytes_to_get = bytes;
134133
# ifdef USE_MMAP
135-
bytes_to_get += GC_page_size - 1;
136-
bytes_to_get &= ~(GC_page_size - 1);
134+
bytes_to_get = ROUNDUP_PAGESIZE(bytes_to_get);
137135
# endif
138136
result = (ptr_t)GET_MEM(bytes_to_get);
139137
GC_add_to_our_memory(result, bytes_to_get);
@@ -148,8 +146,7 @@ GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
148146
scratch_free_ptr -= bytes;
149147
bytes_to_get = bytes;
150148
# ifdef USE_MMAP
151-
bytes_to_get += GC_page_size - 1;
152-
bytes_to_get &= ~(GC_page_size - 1);
149+
bytes_to_get = ROUNDUP_PAGESIZE(bytes_to_get);
153150
# endif
154151
result = (ptr_t)GET_MEM(bytes_to_get);
155152
GC_add_to_our_memory(result, bytes_to_get);

include/private/gc_priv.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -763,6 +763,10 @@ GC_EXTERN GC_warn_proc GC_current_warn_proc;
763763

764764
# define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1))
765765

766+
/* Round up allocation size (in bytes) to a multiple of a granule. */
767+
#define ROUNDUP_GRANULE_SIZE(bytes) \
768+
(((bytes) + (GRANULE_BYTES - 1)) & ~(GRANULE_BYTES - 1))
769+
766770
/* Round up byte allocation requests to integral number of words, etc. */
767771
# define ROUNDED_UP_GRANULES(n) \
768772
BYTES_TO_GRANULES((n) + (GRANULE_BYTES - 1 + EXTRA_BYTES))
@@ -1398,13 +1402,17 @@ GC_EXTERN word GC_n_heap_sects; /* Number of separately added heap */
13981402

13991403
GC_EXTERN word GC_page_size;
14001404

1405+
/* Round up allocation size to a multiple of a page size. */
1406+
/* GC_setpagesize() is assumed to be already invoked. */
1407+
#define ROUNDUP_PAGESIZE(bytes) \
1408+
(((bytes) + GC_page_size - 1) & ~(GC_page_size - 1))
1409+
14011410
#if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
14021411
struct _SYSTEM_INFO;
14031412
GC_EXTERN struct _SYSTEM_INFO GC_sysinfo;
14041413
GC_INNER GC_bool GC_is_heap_base(ptr_t p);
14051414
#endif
14061415

1407-
14081416
GC_EXTERN word GC_black_list_spacing;
14091417
/* Average number of bytes between blacklisted */
14101418
/* blocks. Approximate. */

malloc.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,7 @@ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
4646
ptr_t result;
4747
GC_bool retry = FALSE;
4848

49-
/* Round up to a multiple of a granule. */
50-
lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
49+
lb = ROUNDUP_GRANULE_SIZE(lb);
5150
n_blocks = OBJ_SZ_TO_BLOCKS(lb);
5251
if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
5352
/* Do our share of marking work */

mallocx.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -445,8 +445,8 @@ GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
445445
GC_API void * GC_CALL GC_malloc_many(size_t lb)
446446
{
447447
void *result;
448-
GC_generic_malloc_many((lb + EXTRA_BYTES + GRANULE_BYTES-1)
449-
& ~(GRANULE_BYTES-1),
448+
449+
GC_generic_malloc_many(ROUNDUP_GRANULE_SIZE(lb + EXTRA_BYTES),
450450
NORMAL, &result);
451451
return result;
452452
}

os_dep.c

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2069,8 +2069,7 @@ STATIC ptr_t GC_unix_mmap_get_mem(word bytes)
20692069
# undef IGNORE_PAGES_EXECUTABLE
20702070

20712071
if (result == MAP_FAILED) return(0);
2072-
last_addr = (ptr_t)result + bytes + GC_page_size - 1;
2073-
last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
2072+
last_addr = (ptr_t)ROUNDUP_PAGESIZE((word)result + bytes);
20742073
# if !defined(LINUX)
20752074
if (last_addr == 0) {
20762075
/* Oops. We got the end of the address space. This isn't */
@@ -2190,8 +2189,7 @@ void * os2_alloc(size_t bytes)
21902189
ptr_t result = 0; /* initialized to prevent warning. */
21912190
word i;
21922191

2193-
/* Round up allocation size to multiple of page size */
2194-
bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
2192+
bytes = ROUNDUP_PAGESIZE(bytes);
21952193

21962194
/* Try to find reserved, uncommitted pages */
21972195
for (i = 0; i < GC_n_heap_bases; i++) {
@@ -2366,9 +2364,8 @@ void * os2_alloc(size_t bytes)
23662364
/* Return 0 if the block is too small to make this feasible. */
23672365
STATIC ptr_t GC_unmap_start(ptr_t start, size_t bytes)
23682366
{
2369-
ptr_t result;
2370-
/* Round start to next page boundary. */
2371-
result = (ptr_t)((word)(start + GC_page_size - 1) & ~(GC_page_size - 1));
2367+
ptr_t result = (ptr_t)ROUNDUP_PAGESIZE((word)start);
2368+
23722369
if ((word)(result + GC_page_size) > (word)(start + bytes)) return 0;
23732370
return result;
23742371
}
@@ -3269,8 +3266,7 @@ GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
32693266
# endif
32703267
if (!GC_dirty_maintained) return;
32713268
h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
3272-
h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1)
3273-
& ~(GC_page_size-1));
3269+
h_end = (struct hblk *)ROUNDUP_PAGESIZE((word)(h + nblocks));
32743270
if (h_end == h_trunc + 1 &&
32753271
get_pht_entry_from_index(GC_dirty_pages, PHT_HASH(h_trunc))) {
32763272
/* already marked dirty, and hence unprotected. */

0 commit comments

Comments
 (0)