1 /**************************************************************************
2 *
3 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * \file
30 * Debug buffer manager to detect buffer under- and overflows.
31 *
32 * \author Jose Fonseca <jrfonseca@tungstengraphics.com>
33 */
34
35
36 #include "pipe/p_compiler.h"
37 #include "util/u_debug.h"
38 #include "os/os_thread.h"
39 #include "util/u_math.h"
40 #include "util/u_memory.h"
41 #include "util/u_double_list.h"
42 #include "util/u_time.h"
43 #include "util/u_debug_stack.h"
44
45 #include "pb_buffer.h"
46 #include "pb_bufmgr.h"
47
48
49 #ifdef DEBUG
50
51
52 #define PB_DEBUG_CREATE_BACKTRACE 8
53 #define PB_DEBUG_MAP_BACKTRACE 8
54
55
56 /**
57 * Convenience macro (type safe).
58 */
59 #define SUPER(__derived) (&(__derived)->base)
60
61
62 struct pb_debug_manager;
63
64
65 /**
66 * Wrapper around a pipe buffer which adds delayed destruction.
67 */
68 struct pb_debug_buffer
69 {
70 struct pb_buffer base;
71
72 struct pb_buffer *buffer;
73 struct pb_debug_manager *mgr;
74
75 pb_size underflow_size;
76 pb_size overflow_size;
77
78 struct debug_stack_frame create_backtrace[PB_DEBUG_CREATE_BACKTRACE];
79
80 pipe_mutex mutex;
81 unsigned map_count;
82 struct debug_stack_frame map_backtrace[PB_DEBUG_MAP_BACKTRACE];
83
84 struct list_head head;
85 };
86
87
88 struct pb_debug_manager
89 {
90 struct pb_manager base;
91
92 struct pb_manager *provider;
93
94 pb_size underflow_size;
95 pb_size overflow_size;
96
97 pipe_mutex mutex;
98 struct list_head list;
99 };
100
101
102 static INLINE struct pb_debug_buffer *
pb_debug_buffer(struct pb_buffer * buf)103 pb_debug_buffer(struct pb_buffer *buf)
104 {
105 assert(buf);
106 return (struct pb_debug_buffer *)buf;
107 }
108
109
110 static INLINE struct pb_debug_manager *
pb_debug_manager(struct pb_manager * mgr)111 pb_debug_manager(struct pb_manager *mgr)
112 {
113 assert(mgr);
114 return (struct pb_debug_manager *)mgr;
115 }
116
117
118 static const uint8_t random_pattern[32] = {
119 0xaf, 0xcf, 0xa5, 0xa2, 0xc2, 0x63, 0x15, 0x1a,
120 0x7e, 0xe2, 0x7e, 0x84, 0x15, 0x49, 0xa2, 0x1e,
121 0x49, 0x63, 0xf5, 0x52, 0x74, 0x66, 0x9e, 0xc4,
122 0x6d, 0xcf, 0x2c, 0x4a, 0x74, 0xe6, 0xfd, 0x94
123 };
124
125
126 static INLINE void
fill_random_pattern(uint8_t * dst,pb_size size)127 fill_random_pattern(uint8_t *dst, pb_size size)
128 {
129 pb_size i = 0;
130 while(size--) {
131 *dst++ = random_pattern[i++];
132 i &= sizeof(random_pattern) - 1;
133 }
134 }
135
136
137 static INLINE boolean
check_random_pattern(const uint8_t * dst,pb_size size,pb_size * min_ofs,pb_size * max_ofs)138 check_random_pattern(const uint8_t *dst, pb_size size,
139 pb_size *min_ofs, pb_size *max_ofs)
140 {
141 boolean result = TRUE;
142 pb_size i;
143 *min_ofs = size;
144 *max_ofs = 0;
145 for(i = 0; i < size; ++i) {
146 if(*dst++ != random_pattern[i % sizeof(random_pattern)]) {
147 *min_ofs = MIN2(*min_ofs, i);
148 *max_ofs = MAX2(*max_ofs, i);
149 result = FALSE;
150 }
151 }
152 return result;
153 }
154
155
156 static void
pb_debug_buffer_fill(struct pb_debug_buffer * buf)157 pb_debug_buffer_fill(struct pb_debug_buffer *buf)
158 {
159 uint8_t *map;
160
161 map = pb_map(buf->buffer, PB_USAGE_CPU_WRITE, NULL);
162 assert(map);
163 if(map) {
164 fill_random_pattern(map, buf->underflow_size);
165 fill_random_pattern(map + buf->underflow_size + buf->base.size,
166 buf->overflow_size);
167 pb_unmap(buf->buffer);
168 }
169 }
170
171
172 /**
173 * Check for under/over flows.
174 *
175 * Should be called with the buffer unmaped.
176 */
177 static void
pb_debug_buffer_check(struct pb_debug_buffer * buf)178 pb_debug_buffer_check(struct pb_debug_buffer *buf)
179 {
180 uint8_t *map;
181
182 map = pb_map(buf->buffer,
183 PB_USAGE_CPU_READ |
184 PB_USAGE_UNSYNCHRONIZED, NULL);
185 assert(map);
186 if(map) {
187 boolean underflow, overflow;
188 pb_size min_ofs, max_ofs;
189
190 underflow = !check_random_pattern(map, buf->underflow_size,
191 &min_ofs, &max_ofs);
192 if(underflow) {
193 debug_printf("buffer underflow (offset -%u%s to -%u bytes) detected\n",
194 buf->underflow_size - min_ofs,
195 min_ofs == 0 ? "+" : "",
196 buf->underflow_size - max_ofs);
197 }
198
199 overflow = !check_random_pattern(map + buf->underflow_size + buf->base.size,
200 buf->overflow_size,
201 &min_ofs, &max_ofs);
202 if(overflow) {
203 debug_printf("buffer overflow (size %u plus offset %u to %u%s bytes) detected\n",
204 buf->base.size,
205 min_ofs,
206 max_ofs,
207 max_ofs == buf->overflow_size - 1 ? "+" : "");
208 }
209
210 if(underflow || overflow)
211 debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
212
213 debug_assert(!underflow);
214 debug_assert(!overflow);
215
216 /* re-fill if not aborted */
217 if(underflow)
218 fill_random_pattern(map, buf->underflow_size);
219 if(overflow)
220 fill_random_pattern(map + buf->underflow_size + buf->base.size,
221 buf->overflow_size);
222
223 pb_unmap(buf->buffer);
224 }
225 }
226
227
228 static void
pb_debug_buffer_destroy(struct pb_buffer * _buf)229 pb_debug_buffer_destroy(struct pb_buffer *_buf)
230 {
231 struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
232 struct pb_debug_manager *mgr = buf->mgr;
233
234 assert(!pipe_is_referenced(&buf->base.reference));
235
236 pb_debug_buffer_check(buf);
237
238 pipe_mutex_lock(mgr->mutex);
239 LIST_DEL(&buf->head);
240 pipe_mutex_unlock(mgr->mutex);
241
242 pipe_mutex_destroy(buf->mutex);
243
244 pb_reference(&buf->buffer, NULL);
245 FREE(buf);
246 }
247
248
249 static void *
pb_debug_buffer_map(struct pb_buffer * _buf,unsigned flags,void * flush_ctx)250 pb_debug_buffer_map(struct pb_buffer *_buf,
251 unsigned flags, void *flush_ctx)
252 {
253 struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
254 void *map;
255
256 pb_debug_buffer_check(buf);
257
258 map = pb_map(buf->buffer, flags, flush_ctx);
259 if(!map)
260 return NULL;
261
262 if(map) {
263 pipe_mutex_lock(buf->mutex);
264 ++buf->map_count;
265 debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
266 pipe_mutex_unlock(buf->mutex);
267 }
268
269 return (uint8_t *)map + buf->underflow_size;
270 }
271
272
273 static void
pb_debug_buffer_unmap(struct pb_buffer * _buf)274 pb_debug_buffer_unmap(struct pb_buffer *_buf)
275 {
276 struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
277
278 pipe_mutex_lock(buf->mutex);
279 assert(buf->map_count);
280 if(buf->map_count)
281 --buf->map_count;
282 pipe_mutex_unlock(buf->mutex);
283
284 pb_unmap(buf->buffer);
285
286 pb_debug_buffer_check(buf);
287 }
288
289
290 static void
pb_debug_buffer_get_base_buffer(struct pb_buffer * _buf,struct pb_buffer ** base_buf,pb_size * offset)291 pb_debug_buffer_get_base_buffer(struct pb_buffer *_buf,
292 struct pb_buffer **base_buf,
293 pb_size *offset)
294 {
295 struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
296 pb_get_base_buffer(buf->buffer, base_buf, offset);
297 *offset += buf->underflow_size;
298 }
299
300
301 static enum pipe_error
pb_debug_buffer_validate(struct pb_buffer * _buf,struct pb_validate * vl,unsigned flags)302 pb_debug_buffer_validate(struct pb_buffer *_buf,
303 struct pb_validate *vl,
304 unsigned flags)
305 {
306 struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
307
308 pipe_mutex_lock(buf->mutex);
309 if(buf->map_count) {
310 debug_printf("%s: attempting to validate a mapped buffer\n", __FUNCTION__);
311 debug_printf("last map backtrace is\n");
312 debug_backtrace_dump(buf->map_backtrace, PB_DEBUG_MAP_BACKTRACE);
313 }
314 pipe_mutex_unlock(buf->mutex);
315
316 pb_debug_buffer_check(buf);
317
318 return pb_validate(buf->buffer, vl, flags);
319 }
320
321
322 static void
pb_debug_buffer_fence(struct pb_buffer * _buf,struct pipe_fence_handle * fence)323 pb_debug_buffer_fence(struct pb_buffer *_buf,
324 struct pipe_fence_handle *fence)
325 {
326 struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
327 pb_fence(buf->buffer, fence);
328 }
329
330
331 const struct pb_vtbl
332 pb_debug_buffer_vtbl = {
333 pb_debug_buffer_destroy,
334 pb_debug_buffer_map,
335 pb_debug_buffer_unmap,
336 pb_debug_buffer_validate,
337 pb_debug_buffer_fence,
338 pb_debug_buffer_get_base_buffer
339 };
340
341
342 static void
pb_debug_manager_dump_locked(struct pb_debug_manager * mgr)343 pb_debug_manager_dump_locked(struct pb_debug_manager *mgr)
344 {
345 struct list_head *curr, *next;
346 struct pb_debug_buffer *buf;
347
348 curr = mgr->list.next;
349 next = curr->next;
350 while(curr != &mgr->list) {
351 buf = LIST_ENTRY(struct pb_debug_buffer, curr, head);
352
353 debug_printf("buffer = %p\n", (void *) buf);
354 debug_printf(" .size = 0x%x\n", buf->base.size);
355 debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
356
357 curr = next;
358 next = curr->next;
359 }
360
361 }
362
363
364 static struct pb_buffer *
pb_debug_manager_create_buffer(struct pb_manager * _mgr,pb_size size,const struct pb_desc * desc)365 pb_debug_manager_create_buffer(struct pb_manager *_mgr,
366 pb_size size,
367 const struct pb_desc *desc)
368 {
369 struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
370 struct pb_debug_buffer *buf;
371 struct pb_desc real_desc;
372 pb_size real_size;
373
374 assert(size);
375 assert(desc->alignment);
376
377 buf = CALLOC_STRUCT(pb_debug_buffer);
378 if(!buf)
379 return NULL;
380
381 real_size = mgr->underflow_size + size + mgr->overflow_size;
382 real_desc = *desc;
383 real_desc.usage |= PB_USAGE_CPU_WRITE;
384 real_desc.usage |= PB_USAGE_CPU_READ;
385
386 buf->buffer = mgr->provider->create_buffer(mgr->provider,
387 real_size,
388 &real_desc);
389 if(!buf->buffer) {
390 FREE(buf);
391 #if 0
392 pipe_mutex_lock(mgr->mutex);
393 debug_printf("%s: failed to create buffer\n", __FUNCTION__);
394 if(!LIST_IS_EMPTY(&mgr->list))
395 pb_debug_manager_dump_locked(mgr);
396 pipe_mutex_unlock(mgr->mutex);
397 #endif
398 return NULL;
399 }
400
401 assert(pipe_is_referenced(&buf->buffer->reference));
402 assert(pb_check_alignment(real_desc.alignment, buf->buffer->alignment));
403 assert(pb_check_usage(real_desc.usage, buf->buffer->usage));
404 assert(buf->buffer->size >= real_size);
405
406 pipe_reference_init(&buf->base.reference, 1);
407 buf->base.alignment = desc->alignment;
408 buf->base.usage = desc->usage;
409 buf->base.size = size;
410
411 buf->base.vtbl = &pb_debug_buffer_vtbl;
412 buf->mgr = mgr;
413
414 buf->underflow_size = mgr->underflow_size;
415 buf->overflow_size = buf->buffer->size - buf->underflow_size - size;
416
417 debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE);
418
419 pb_debug_buffer_fill(buf);
420
421 pipe_mutex_init(buf->mutex);
422
423 pipe_mutex_lock(mgr->mutex);
424 LIST_ADDTAIL(&buf->head, &mgr->list);
425 pipe_mutex_unlock(mgr->mutex);
426
427 return &buf->base;
428 }
429
430
431 static void
pb_debug_manager_flush(struct pb_manager * _mgr)432 pb_debug_manager_flush(struct pb_manager *_mgr)
433 {
434 struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
435 assert(mgr->provider->flush);
436 if(mgr->provider->flush)
437 mgr->provider->flush(mgr->provider);
438 }
439
440
441 static void
pb_debug_manager_destroy(struct pb_manager * _mgr)442 pb_debug_manager_destroy(struct pb_manager *_mgr)
443 {
444 struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
445
446 pipe_mutex_lock(mgr->mutex);
447 if(!LIST_IS_EMPTY(&mgr->list)) {
448 debug_printf("%s: unfreed buffers\n", __FUNCTION__);
449 pb_debug_manager_dump_locked(mgr);
450 }
451 pipe_mutex_unlock(mgr->mutex);
452
453 pipe_mutex_destroy(mgr->mutex);
454 mgr->provider->destroy(mgr->provider);
455 FREE(mgr);
456 }
457
458
459 struct pb_manager *
pb_debug_manager_create(struct pb_manager * provider,pb_size underflow_size,pb_size overflow_size)460 pb_debug_manager_create(struct pb_manager *provider,
461 pb_size underflow_size, pb_size overflow_size)
462 {
463 struct pb_debug_manager *mgr;
464
465 if(!provider)
466 return NULL;
467
468 mgr = CALLOC_STRUCT(pb_debug_manager);
469 if (!mgr)
470 return NULL;
471
472 mgr->base.destroy = pb_debug_manager_destroy;
473 mgr->base.create_buffer = pb_debug_manager_create_buffer;
474 mgr->base.flush = pb_debug_manager_flush;
475 mgr->provider = provider;
476 mgr->underflow_size = underflow_size;
477 mgr->overflow_size = overflow_size;
478
479 pipe_mutex_init(mgr->mutex);
480 LIST_INITHEAD(&mgr->list);
481
482 return &mgr->base;
483 }
484
485
486 #else /* !DEBUG */
487
488
489 struct pb_manager *
pb_debug_manager_create(struct pb_manager * provider,pb_size underflow_size,pb_size overflow_size)490 pb_debug_manager_create(struct pb_manager *provider,
491 pb_size underflow_size, pb_size overflow_size)
492 {
493 return provider;
494 }
495
496
497 #endif /* !DEBUG */
498