1 
2 // Simplified version of mempool.c, that is more oriented towards
3 // checking that the description of invalid addresses is correct.
4 
5 #include <stdio.h>
6 #include <unistd.h>
7 #include "tests/sys_mman.h"
8 #include <assert.h>
9 #include <stdlib.h>
10 
11 #include "../memcheck.h"
12 
13 #define SUPERBLOCK_SIZE 100000
14 #define REDZONE_SIZE 8
15 
16 typedef struct _level_list
17 {
18    struct _level_list *next;
19    char *where;
20    // Padding ensures the struct is the same size on 32-bit and 64-bit
21    // machines.
22    char padding[16 - 2*sizeof(char*)];
23 } level_list;
24 
25 typedef struct _pool {
26    char *mem;
27    char *where;
28    level_list *levels;
29    int size, left;
30    // Padding ensures the struct is the same size on 32-bit and 64-bit
31    // machines.
32    char padding[24 - 3*sizeof(char*)];
33 } pool;
34 
make_pool(int use_mmap)35 pool *make_pool( int use_mmap )
36 {
37    pool *p;
38 
39    if (use_mmap) {
40       p = (pool *)mmap(0, sizeof(pool), PROT_READ|PROT_WRITE|PROT_EXEC,
41                        MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
42       p->where = p->mem = (char *)mmap(NULL, SUPERBLOCK_SIZE,
43                                        PROT_READ|PROT_WRITE|PROT_EXEC,
44                                        MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
45    } else {
46       p = (pool *)malloc(sizeof(pool));
47       p->where = p->mem = (char *)malloc(SUPERBLOCK_SIZE);
48    }
49 
50    p->size = p->left = SUPERBLOCK_SIZE;
51    p->levels = NULL;
52    (void) VALGRIND_MAKE_MEM_NOACCESS(p->where, SUPERBLOCK_SIZE);
53    return p;
54 }
55 
push(pool * p,int use_mmap)56 void push(pool *p, int use_mmap )
57 {
58    level_list *l;
59 
60    if (use_mmap)
61       l = (level_list *)mmap(0, sizeof(level_list),
62                              PROT_READ|PROT_WRITE|PROT_EXEC,
63                              MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
64    else
65       l = (level_list *)malloc(sizeof(level_list));
66 
67    l->next = p->levels;
68    l->where = p->where;
69    VALGRIND_CREATE_MEMPOOL(l->where, REDZONE_SIZE, 0);
70    p->levels = l;
71 }
72 
pop(pool * p,int use_mmap)73 void pop(pool *p, int use_mmap)
74 {
75    level_list *l = p->levels;
76    p->levels = l->next;
77    VALGRIND_DESTROY_MEMPOOL(l->where);
78    (void) VALGRIND_MAKE_MEM_NOACCESS(l->where, p->where-l->where);
79    p->where = l->where;
80    if (use_mmap)
81       munmap(l, sizeof(level_list));
82    else
83       free(l);
84 }
85 
destroy_pool(pool * p,int use_mmap)86 void destroy_pool(pool *p, int use_mmap)
87 {
88    level_list *l = p->levels;
89 
90    while(l) {
91       pop(p, use_mmap);
92    }
93    if (use_mmap) {
94       munmap(p->mem, SUPERBLOCK_SIZE);
95       munmap(p, sizeof(pool));
96    } else {
97       free(p->mem);
98       free(p);
99    }
100 }
101 
allocate(pool * p,int size)102 char *allocate(pool *p, int size)
103 {
104    char *where;
105    p->left -= size + (REDZONE_SIZE*2);
106    where = p->where + REDZONE_SIZE;
107    p->where += size + (REDZONE_SIZE*2);
108    VALGRIND_MEMPOOL_ALLOC(p->levels->where, where, size);
109    return where;
110 }
111 
112 //-------------------------------------------------------------------------
113 // Rest
114 //-------------------------------------------------------------------------
115 
test(void)116 void test(void)
117 {
118    char *x1, *x2;
119    char res = 0;
120 
121    // p1 is a malloc-backed pool
122    pool *p1 = make_pool(0);
123 
124    // p2 is a mmap-backed pool
125    pool *p2 = make_pool(1);
126 
127    push(p1, 0);
128    push(p2, 1);
129 
130    x1 = allocate(p1, 10);
131    x2 = allocate(p2, 20);
132 
133    fprintf(stderr,
134            "\n------ out of range reads in malloc-backed pool ------\n\n");
135    res += x1[-1];
136    res += x1[10];
137 
138    fprintf(stderr,
139            "\n------ out of range reads in mmap-backed pool ------\n\n");
140    res += x2[-1]; // invalid
141    res += x2[20]; // invalid
142 
143    fprintf(stderr,
144            "\n------ read free in malloc-backed pool ------\n\n");
145    VALGRIND_MEMPOOL_FREE(p1, x1);
146    res += x1[5];
147 
148    fprintf(stderr,
149            "\n------ read free in mmap-backed pool ------\n\n");
150    VALGRIND_MEMPOOL_FREE(p2, x2);
151    res += x2[11];
152 
153    fprintf(stderr,
154            "\n------ double free in malloc-backed pool ------\n\n");
155    VALGRIND_MEMPOOL_FREE(p1, x1);
156 
157    fprintf(stderr,
158            "\n------ double free in mmap-backed pool ------\n\n");
159    VALGRIND_MEMPOOL_FREE(p2, x2);
160 
161    {
162       // test that redzone are still protected even if the user forgets
163       // to mark the superblock noaccess.
164       char superblock[100];
165 
166       VALGRIND_CREATE_MEMPOOL(superblock, REDZONE_SIZE, 0);
167       // User should mark the superblock no access to benefit
168       // from full Valgrind memcheck protection.
169       // VALGRIND_MEMPOOL_ALLOC will however still ensure the
170       // redzones are protected.
171       VALGRIND_MEMPOOL_ALLOC(superblock, superblock+30, 10);
172 
173       res += superblock[30]; // valid
174       res += superblock[39]; // valid
175 
176       fprintf(stderr,
177               "\n------ 2 invalid access in 'no no-access superblock' ---\n\n");
178       res += superblock[29]; // invalid
179       res += superblock[40]; // invalid
180 
181       VALGRIND_DESTROY_MEMPOOL(superblock);
182    }
183    // claim res is used, so gcc can't nuke this all
184    __asm__ __volatile__("" : : "r"(res));
185 
186    fprintf(stderr,
187            "\n------ done ------\n\n");
188    pop(p1, 0);
189    pop(p2, 1);
190    destroy_pool(p1, 0);
191    destroy_pool(p2, 1);
192 }
193 
main(void)194 int main(void)
195 {
196    test();
197    return 0;
198 }
199