1 //
2 // Copyright (c) 2017 The Khronos Group Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "function_list.h"
18 #include "test_functions.h"
19 #include "utility.h"
20
21 #include <cstring>
22
BuildKernel(const char * name,int vectorSize,cl_uint kernel_count,cl_kernel * k,cl_program * p,bool relaxedMode)23 static int BuildKernel(const char *name, int vectorSize, cl_uint kernel_count,
24 cl_kernel *k, cl_program *p, bool relaxedMode)
25 {
26 const char *c[] = { "__kernel void math_kernel",
27 sizeNames[vectorSize],
28 "( __global int",
29 sizeNames[vectorSize],
30 "* out, __global float",
31 sizeNames[vectorSize],
32 "* in )\n"
33 "{\n"
34 " size_t i = get_global_id(0);\n"
35 " out[i] = ",
36 name,
37 "( in[i] );\n"
38 "}\n" };
39
40 const char *c3[] = {
41 "__kernel void math_kernel",
42 sizeNames[vectorSize],
43 "( __global int* out, __global float* in)\n"
44 "{\n"
45 " size_t i = get_global_id(0);\n"
46 " if( i + 1 < get_global_size(0) )\n"
47 " {\n"
48 " float3 f0 = vload3( 0, in + 3 * i );\n"
49 " int3 i0 = ",
50 name,
51 "( f0 );\n"
52 " vstore3( i0, 0, out + 3*i );\n"
53 " }\n"
54 " else\n"
55 " {\n"
56 " size_t parity = i & 1; // Figure out how many elements are "
57 "left over after BUFFER_SIZE % (3*sizeof(float)). Assume power of two "
58 "buffer size \n"
59 " int3 i0;\n"
60 " float3 f0;\n"
61 " switch( parity )\n"
62 " {\n"
63 " case 1:\n"
64 " f0 = (float3)( in[3*i], 0xdead, 0xdead ); \n"
65 " break;\n"
66 " case 0:\n"
67 " f0 = (float3)( in[3*i], in[3*i+1], 0xdead ); \n"
68 " break;\n"
69 " }\n"
70 " i0 = ",
71 name,
72 "( f0 );\n"
73 " switch( parity )\n"
74 " {\n"
75 " case 0:\n"
76 " out[3*i+1] = i0.y; \n"
77 " // fall through\n"
78 " case 1:\n"
79 " out[3*i] = i0.x; \n"
80 " break;\n"
81 " }\n"
82 " }\n"
83 "}\n"
84 };
85
86 const char **kern = c;
87 size_t kernSize = sizeof(c) / sizeof(c[0]);
88
89 if (sizeValues[vectorSize] == 3)
90 {
91 kern = c3;
92 kernSize = sizeof(c3) / sizeof(c3[0]);
93 }
94
95 char testName[32];
96 snprintf(testName, sizeof(testName) - 1, "math_kernel%s",
97 sizeNames[vectorSize]);
98
99 return MakeKernels(kern, (cl_uint)kernSize, testName, kernel_count, k, p,
100 relaxedMode);
101 }
102
103 typedef struct BuildKernelInfo
104 {
105 cl_uint offset; // the first vector size to build
106 cl_uint kernel_count;
107 cl_kernel **kernels;
108 cl_program *programs;
109 const char *nameInCode;
110 bool relaxedMode; // Whether to build with -cl-fast-relaxed-math.
111 } BuildKernelInfo;
112
BuildKernelFn(cl_uint job_id,cl_uint thread_id UNUSED,void * p)113 static cl_int BuildKernelFn(cl_uint job_id, cl_uint thread_id UNUSED, void *p)
114 {
115 BuildKernelInfo *info = (BuildKernelInfo *)p;
116 cl_uint i = info->offset + job_id;
117 return BuildKernel(info->nameInCode, i, info->kernel_count,
118 info->kernels[i], info->programs + i, info->relaxedMode);
119 }
120
121 // Thread specific data for a worker thread
122 typedef struct ThreadInfo
123 {
124 cl_mem inBuf; // input buffer for the thread
125 cl_mem outBuf[VECTOR_SIZE_COUNT]; // output buffers for the thread
126 cl_command_queue tQueue; // per thread command queue to improve performance
127 } ThreadInfo;
128
129 typedef struct TestInfo
130 {
131 size_t subBufferSize; // Size of the sub-buffer in elements
132 const Func *f; // A pointer to the function info
133 cl_program programs[VECTOR_SIZE_COUNT]; // programs for various vector sizes
134 cl_kernel
135 *k[VECTOR_SIZE_COUNT]; // arrays of thread-specific kernels for each
136 // worker thread: k[vector_size][thread_id]
137 ThreadInfo *
138 tinfo; // An array of thread specific information for each worker thread
139 cl_uint threadCount; // Number of worker threads
140 cl_uint jobCount; // Number of jobs
141 cl_uint step; // step between each chunk and the next.
142 cl_uint scale; // stride between individual test values
143 int ftz; // non-zero if running in flush to zero mode
144
145 } TestInfo;
146
147 static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data);
148
TestMacro_Int_Float(const Func * f,MTdata d,bool relaxedMode)149 int TestMacro_Int_Float(const Func *f, MTdata d, bool relaxedMode)
150 {
151 TestInfo test_info;
152 cl_int error;
153
154 logFunctionInfo(f->name, sizeof(cl_float), relaxedMode);
155
156 // Init test_info
157 memset(&test_info, 0, sizeof(test_info));
158 test_info.threadCount = GetThreadCount();
159 test_info.subBufferSize = BUFFER_SIZE
160 / (sizeof(cl_float) * RoundUpToNextPowerOfTwo(test_info.threadCount));
161 test_info.scale = getTestScale(sizeof(cl_float));
162
163 test_info.step = (cl_uint)test_info.subBufferSize * test_info.scale;
164 if (test_info.step / test_info.subBufferSize != test_info.scale)
165 {
166 // there was overflow
167 test_info.jobCount = 1;
168 }
169 else
170 {
171 test_info.jobCount = (cl_uint)((1ULL << 32) / test_info.step);
172 }
173
174 test_info.f = f;
175 test_info.ftz =
176 f->ftz || gForceFTZ || 0 == (CL_FP_DENORM & gFloatCapabilities);
177
178 // cl_kernels aren't thread safe, so we make one for each vector size for
179 // every thread
180 for (auto i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
181 {
182 size_t array_size = test_info.threadCount * sizeof(cl_kernel);
183 test_info.k[i] = (cl_kernel *)malloc(array_size);
184 if (NULL == test_info.k[i])
185 {
186 vlog_error("Error: Unable to allocate storage for kernels!\n");
187 error = CL_OUT_OF_HOST_MEMORY;
188 goto exit;
189 }
190 memset(test_info.k[i], 0, array_size);
191 }
192 test_info.tinfo =
193 (ThreadInfo *)malloc(test_info.threadCount * sizeof(*test_info.tinfo));
194 if (NULL == test_info.tinfo)
195 {
196 vlog_error(
197 "Error: Unable to allocate storage for thread specific data.\n");
198 error = CL_OUT_OF_HOST_MEMORY;
199 goto exit;
200 }
201 memset(test_info.tinfo, 0,
202 test_info.threadCount * sizeof(*test_info.tinfo));
203 for (cl_uint i = 0; i < test_info.threadCount; i++)
204 {
205 cl_buffer_region region = {
206 i * test_info.subBufferSize * sizeof(cl_float),
207 test_info.subBufferSize * sizeof(cl_float)
208 };
209 test_info.tinfo[i].inBuf =
210 clCreateSubBuffer(gInBuffer, CL_MEM_READ_ONLY,
211 CL_BUFFER_CREATE_TYPE_REGION, ®ion, &error);
212 if (error || NULL == test_info.tinfo[i].inBuf)
213 {
214 vlog_error("Error: Unable to create sub-buffer of gInBuffer for "
215 "region {%zd, %zd}\n",
216 region.origin, region.size);
217 goto exit;
218 }
219
220 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
221 {
222 test_info.tinfo[i].outBuf[j] = clCreateSubBuffer(
223 gOutBuffer[j], CL_MEM_WRITE_ONLY, CL_BUFFER_CREATE_TYPE_REGION,
224 ®ion, &error);
225 if (error || NULL == test_info.tinfo[i].outBuf[j])
226 {
227 vlog_error("Error: Unable to create sub-buffer of "
228 "gOutBuffer[%d] for region {%zd, %zd}\n",
229 (int)j, region.origin, region.size);
230 goto exit;
231 }
232 }
233 test_info.tinfo[i].tQueue =
234 clCreateCommandQueue(gContext, gDevice, 0, &error);
235 if (NULL == test_info.tinfo[i].tQueue || error)
236 {
237 vlog_error("clCreateCommandQueue failed. (%d)\n", error);
238 goto exit;
239 }
240 }
241
242 // Init the kernels
243 {
244 BuildKernelInfo build_info = {
245 gMinVectorSizeIndex, test_info.threadCount, test_info.k,
246 test_info.programs, f->nameInCode, relaxedMode
247 };
248 if ((error = ThreadPool_Do(BuildKernelFn,
249 gMaxVectorSizeIndex - gMinVectorSizeIndex,
250 &build_info)))
251 goto exit;
252 }
253
254 // Run the kernels
255 if (!gSkipCorrectnessTesting)
256 {
257 error = ThreadPool_Do(Test, test_info.jobCount, &test_info);
258
259 if (error) goto exit;
260
261 if (gWimpyMode)
262 vlog("Wimp pass");
263 else
264 vlog("passed");
265 }
266
267 vlog("\n");
268
269 exit:
270 // Release
271 for (auto i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
272 {
273 clReleaseProgram(test_info.programs[i]);
274 if (test_info.k[i])
275 {
276 for (cl_uint j = 0; j < test_info.threadCount; j++)
277 clReleaseKernel(test_info.k[i][j]);
278
279 free(test_info.k[i]);
280 }
281 }
282 if (test_info.tinfo)
283 {
284 for (cl_uint i = 0; i < test_info.threadCount; i++)
285 {
286 clReleaseMemObject(test_info.tinfo[i].inBuf);
287 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
288 clReleaseMemObject(test_info.tinfo[i].outBuf[j]);
289 clReleaseCommandQueue(test_info.tinfo[i].tQueue);
290 }
291
292 free(test_info.tinfo);
293 }
294
295 return error;
296 }
297
Test(cl_uint job_id,cl_uint thread_id,void * data)298 static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
299 {
300 const TestInfo *job = (const TestInfo *)data;
301 size_t buffer_elements = job->subBufferSize;
302 size_t buffer_size = buffer_elements * sizeof(cl_float);
303 cl_uint scale = job->scale;
304 cl_uint base = job_id * (cl_uint)job->step;
305 ThreadInfo *tinfo = job->tinfo + thread_id;
306 fptr func = job->f->func;
307 int ftz = job->ftz;
308 cl_int error = CL_SUCCESS;
309 cl_int ret = CL_SUCCESS;
310 const char *name = job->f->name;
311
312 int signbit_test = 0;
313 if (!strcmp(name, "signbit")) signbit_test = 1;
314
315 #define ref_func(s) (signbit_test ? func.i_f_f(s) : func.i_f(s))
316
317 // start the map of the output arrays
318 cl_event e[VECTOR_SIZE_COUNT];
319 cl_int *out[VECTOR_SIZE_COUNT];
320 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
321 {
322 out[j] = (cl_int *)clEnqueueMapBuffer(
323 tinfo->tQueue, tinfo->outBuf[j], CL_FALSE, CL_MAP_WRITE, 0,
324 buffer_size, 0, NULL, e + j, &error);
325 if (error || NULL == out[j])
326 {
327 vlog_error("Error: clEnqueueMapBuffer %d failed! err: %d\n", j,
328 error);
329 return error;
330 }
331 }
332
333 // Get that moving
334 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush failed\n");
335
336 // Init input array
337 cl_uint *p = (cl_uint *)gIn + thread_id * buffer_elements;
338 for (size_t j = 0; j < buffer_elements; j++) p[j] = base + j * scale;
339
340 if ((error = clEnqueueWriteBuffer(tinfo->tQueue, tinfo->inBuf, CL_FALSE, 0,
341 buffer_size, p, 0, NULL, NULL)))
342 {
343 vlog_error("Error: clEnqueueWriteBuffer failed! err: %d\n", error);
344 return error;
345 }
346
347 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
348 {
349 // Wait for the map to finish
350 if ((error = clWaitForEvents(1, e + j)))
351 {
352 vlog_error("Error: clWaitForEvents failed! err: %d\n", error);
353 return error;
354 }
355 if ((error = clReleaseEvent(e[j])))
356 {
357 vlog_error("Error: clReleaseEvent failed! err: %d\n", error);
358 return error;
359 }
360
361 // Fill the result buffer with garbage, so that old results don't carry
362 // over
363 uint32_t pattern = 0xffffdead;
364 memset_pattern4(out[j], &pattern, buffer_size);
365 if ((error = clEnqueueUnmapMemObject(tinfo->tQueue, tinfo->outBuf[j],
366 out[j], 0, NULL, NULL)))
367 {
368 vlog_error("Error: clEnqueueMapBuffer failed! err: %d\n", error);
369 return error;
370 }
371
372 // run the kernel
373 size_t vectorCount =
374 (buffer_elements + sizeValues[j] - 1) / sizeValues[j];
375 cl_kernel kernel = job->k[j][thread_id]; // each worker thread has its
376 // own copy of the cl_kernel
377 cl_program program = job->programs[j];
378
379 if ((error = clSetKernelArg(kernel, 0, sizeof(tinfo->outBuf[j]),
380 &tinfo->outBuf[j])))
381 {
382 LogBuildError(program);
383 return error;
384 }
385 if ((error = clSetKernelArg(kernel, 1, sizeof(tinfo->inBuf),
386 &tinfo->inBuf)))
387 {
388 LogBuildError(program);
389 return error;
390 }
391
392 if ((error = clEnqueueNDRangeKernel(tinfo->tQueue, kernel, 1, NULL,
393 &vectorCount, NULL, 0, NULL, NULL)))
394 {
395 vlog_error("FAILED -- could not execute kernel\n");
396 return error;
397 }
398 }
399
400 // Get that moving
401 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush 2 failed\n");
402
403 if (gSkipCorrectnessTesting) return CL_SUCCESS;
404
405 // Calculate the correctly rounded reference result
406 cl_int *r = (cl_int *)gOut_Ref + thread_id * buffer_elements;
407 float *s = (float *)p;
408 for (size_t j = 0; j < buffer_elements; j++) r[j] = ref_func(s[j]);
409
410 // Read the data back -- no need to wait for the first N-1 buffers but wait
411 // for the last buffer. This is an in order queue.
412 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
413 {
414 cl_bool blocking = (j + 1 < gMaxVectorSizeIndex) ? CL_FALSE : CL_TRUE;
415 out[j] = (cl_int *)clEnqueueMapBuffer(
416 tinfo->tQueue, tinfo->outBuf[j], blocking, CL_MAP_READ, 0,
417 buffer_size, 0, NULL, NULL, &error);
418 if (error || NULL == out[j])
419 {
420 vlog_error("Error: clEnqueueMapBuffer %d failed! err: %d\n", j,
421 error);
422 return error;
423 }
424 }
425
426 // Verify data
427 cl_int *t = (cl_int *)r;
428 for (size_t j = 0; j < buffer_elements; j++)
429 {
430 for (auto k = gMinVectorSizeIndex; k < gMaxVectorSizeIndex; k++)
431 {
432 cl_int *q = out[0];
433
434 // If we aren't getting the correctly rounded result
435 if (gMinVectorSizeIndex == 0 && t[j] != q[j])
436 {
437 // If we aren't getting the correctly rounded result
438 if (ftz)
439 {
440 if (IsFloatSubnormal(s[j]))
441 {
442 int correct = ref_func(+0.0f);
443 int correct2 = ref_func(-0.0f);
444 if (correct == q[j] || correct2 == q[j]) continue;
445 }
446 }
447
448 uint32_t err = t[j] - q[j];
449 if (q[j] > t[j]) err = q[j] - t[j];
450 vlog_error("\nERROR: %s: %d ulp error at %a: *%d vs. %d\n",
451 name, err, ((float *)s)[j], t[j], q[j]);
452 error = -1;
453 goto exit;
454 }
455
456
457 for (auto k = MAX(1, gMinVectorSizeIndex); k < gMaxVectorSizeIndex;
458 k++)
459 {
460 q = out[k];
461 // If we aren't getting the correctly rounded result
462 if (-t[j] != q[j])
463 {
464 if (ftz)
465 {
466 if (IsFloatSubnormal(s[j]))
467 {
468 int correct = -ref_func(+0.0f);
469 int correct2 = -ref_func(-0.0f);
470 if (correct == q[j] || correct2 == q[j]) continue;
471 }
472 }
473
474 uint32_t err = -t[j] - q[j];
475 if (q[j] > -t[j]) err = q[j] + t[j];
476 vlog_error(
477 "\nERROR: %s%s: %d ulp error at %a: *%d vs. %d\n", name,
478 sizeNames[k], err, ((float *)s)[j], -t[j], q[j]);
479 error = -1;
480 goto exit;
481 }
482 }
483 }
484 }
485
486 exit:
487 ret = error;
488 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
489 {
490 if ((error = clEnqueueUnmapMemObject(tinfo->tQueue, tinfo->outBuf[j],
491 out[j], 0, NULL, NULL)))
492 {
493 vlog_error("Error: clEnqueueUnmapMemObject %d failed 2! err: %d\n",
494 j, error);
495 return error;
496 }
497 }
498
499 if ((error = clFlush(tinfo->tQueue)))
500 {
501 vlog("clFlush 3 failed\n");
502 return error;
503 }
504
505
506 if (0 == (base & 0x0fffffff))
507 {
508 if (gVerboseBruteForce)
509 {
510 vlog("base:%14u step:%10u scale:%10u buf_elements:%10zd "
511 "ThreadCount:%2u\n",
512 base, job->step, job->scale, buffer_elements,
513 job->threadCount);
514 }
515 else
516 {
517 vlog(".");
518 }
519 fflush(stdout);
520 }
521
522 return ret;
523 }
524