1 //
2 // Copyright (c) 2017 The Khronos Group Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "function_list.h"
18 #include "test_functions.h"
19 #include "utility.h"
20
21 #include <cstring>
22
BuildKernel(const char * name,int vectorSize,cl_uint kernel_count,cl_kernel * k,cl_program * p,bool relaxedMode)23 static int BuildKernel(const char *name, int vectorSize, cl_uint kernel_count,
24 cl_kernel *k, cl_program *p, bool relaxedMode)
25 {
26 const char *c[] = { "#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n",
27 "__kernel void math_kernel",
28 sizeNames[vectorSize],
29 "( __global long",
30 sizeNames[vectorSize],
31 "* out, __global double",
32 sizeNames[vectorSize],
33 "* in )\n"
34 "{\n"
35 " size_t i = get_global_id(0);\n"
36 " out[i] = ",
37 name,
38 "( in[i] );\n"
39 "}\n" };
40
41 const char *c3[] = {
42 "#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n",
43 "__kernel void math_kernel",
44 sizeNames[vectorSize],
45 "( __global long* out, __global double* in)\n"
46 "{\n"
47 " size_t i = get_global_id(0);\n"
48 " if( i + 1 < get_global_size(0) )\n"
49 " {\n"
50 " double3 d0 = vload3( 0, in + 3 * i );\n"
51 " long3 l0 = ",
52 name,
53 "( d0 );\n"
54 " vstore3( l0, 0, out + 3*i );\n"
55 " }\n"
56 " else\n"
57 " {\n"
58 " size_t parity = i & 1; // Figure out how many elements are "
59 "left over after BUFFER_SIZE % (3*sizeof(float)). Assume power of two "
60 "buffer size \n"
61 " double3 d0;\n"
62 " switch( parity )\n"
63 " {\n"
64 " case 1:\n"
65 " d0 = (double3)( in[3*i], NAN, NAN ); \n"
66 " break;\n"
67 " case 0:\n"
68 " d0 = (double3)( in[3*i], in[3*i+1], NAN ); \n"
69 " break;\n"
70 " }\n"
71 " long3 l0 = ",
72 name,
73 "( d0 );\n"
74 " switch( parity )\n"
75 " {\n"
76 " case 0:\n"
77 " out[3*i+1] = l0.y; \n"
78 " // fall through\n"
79 " case 1:\n"
80 " out[3*i] = l0.x; \n"
81 " break;\n"
82 " }\n"
83 " }\n"
84 "}\n"
85 };
86
87 const char **kern = c;
88 size_t kernSize = sizeof(c) / sizeof(c[0]);
89
90 if (sizeValues[vectorSize] == 3)
91 {
92 kern = c3;
93 kernSize = sizeof(c3) / sizeof(c3[0]);
94 }
95
96 char testName[32];
97 snprintf(testName, sizeof(testName) - 1, "math_kernel%s",
98 sizeNames[vectorSize]);
99
100 return MakeKernels(kern, (cl_uint)kernSize, testName, kernel_count, k, p,
101 relaxedMode);
102 }
103
104 typedef struct BuildKernelInfo
105 {
106 cl_uint offset; // the first vector size to build
107 cl_uint kernel_count;
108 cl_kernel **kernels;
109 cl_program *programs;
110 const char *nameInCode;
111 bool relaxedMode; // Whether to build with -cl-fast-relaxed-math.
112 } BuildKernelInfo;
113
BuildKernelFn(cl_uint job_id,cl_uint thread_id UNUSED,void * p)114 static cl_int BuildKernelFn(cl_uint job_id, cl_uint thread_id UNUSED, void *p)
115 {
116 BuildKernelInfo *info = (BuildKernelInfo *)p;
117 cl_uint i = info->offset + job_id;
118 return BuildKernel(info->nameInCode, i, info->kernel_count,
119 info->kernels[i], info->programs + i, info->relaxedMode);
120 }
121
122 // Thread specific data for a worker thread
123 typedef struct ThreadInfo
124 {
125 cl_mem inBuf; // input buffer for the thread
126 cl_mem outBuf[VECTOR_SIZE_COUNT]; // output buffers for the thread
127 cl_command_queue tQueue; // per thread command queue to improve performance
128 } ThreadInfo;
129
130 typedef struct TestInfo
131 {
132 size_t subBufferSize; // Size of the sub-buffer in elements
133 const Func *f; // A pointer to the function info
134 cl_program programs[VECTOR_SIZE_COUNT]; // programs for various vector sizes
135 cl_kernel
136 *k[VECTOR_SIZE_COUNT]; // arrays of thread-specific kernels for each
137 // worker thread: k[vector_size][thread_id]
138 ThreadInfo *
139 tinfo; // An array of thread specific information for each worker thread
140 cl_uint threadCount; // Number of worker threads
141 cl_uint jobCount; // Number of jobs
142 cl_uint step; // step between each chunk and the next.
143 cl_uint scale; // stride between individual test values
144 int ftz; // non-zero if running in flush to zero mode
145
146 } TestInfo;
147
148 static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data);
149
TestMacro_Int_Double(const Func * f,MTdata d,bool relaxedMode)150 int TestMacro_Int_Double(const Func *f, MTdata d, bool relaxedMode)
151 {
152 TestInfo test_info;
153 cl_int error;
154
155 logFunctionInfo(f->name, sizeof(cl_double), relaxedMode);
156
157 // Init test_info
158 memset(&test_info, 0, sizeof(test_info));
159 test_info.threadCount = GetThreadCount();
160 test_info.subBufferSize = BUFFER_SIZE
161 / (sizeof(cl_double) * RoundUpToNextPowerOfTwo(test_info.threadCount));
162 test_info.scale = getTestScale(sizeof(cl_double));
163
164 test_info.step = (cl_uint)test_info.subBufferSize * test_info.scale;
165 if (test_info.step / test_info.subBufferSize != test_info.scale)
166 {
167 // there was overflow
168 test_info.jobCount = 1;
169 }
170 else
171 {
172 test_info.jobCount = (cl_uint)((1ULL << 32) / test_info.step);
173 }
174
175 test_info.f = f;
176 test_info.ftz = f->ftz || gForceFTZ;
177
178 // cl_kernels aren't thread safe, so we make one for each vector size for
179 // every thread
180 for (auto i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
181 {
182 size_t array_size = test_info.threadCount * sizeof(cl_kernel);
183 test_info.k[i] = (cl_kernel *)malloc(array_size);
184 if (NULL == test_info.k[i])
185 {
186 vlog_error("Error: Unable to allocate storage for kernels!\n");
187 error = CL_OUT_OF_HOST_MEMORY;
188 goto exit;
189 }
190 memset(test_info.k[i], 0, array_size);
191 }
192 test_info.tinfo =
193 (ThreadInfo *)malloc(test_info.threadCount * sizeof(*test_info.tinfo));
194 if (NULL == test_info.tinfo)
195 {
196 vlog_error(
197 "Error: Unable to allocate storage for thread specific data.\n");
198 error = CL_OUT_OF_HOST_MEMORY;
199 goto exit;
200 }
201 memset(test_info.tinfo, 0,
202 test_info.threadCount * sizeof(*test_info.tinfo));
203 for (cl_uint i = 0; i < test_info.threadCount; i++)
204 {
205 cl_buffer_region region = {
206 i * test_info.subBufferSize * sizeof(cl_double),
207 test_info.subBufferSize * sizeof(cl_double)
208 };
209 test_info.tinfo[i].inBuf =
210 clCreateSubBuffer(gInBuffer, CL_MEM_READ_ONLY,
211 CL_BUFFER_CREATE_TYPE_REGION, ®ion, &error);
212 if (error || NULL == test_info.tinfo[i].inBuf)
213 {
214 vlog_error("Error: Unable to create sub-buffer of gInBuffer for "
215 "region {%zd, %zd}\n",
216 region.origin, region.size);
217 goto exit;
218 }
219
220 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
221 {
222 test_info.tinfo[i].outBuf[j] = clCreateSubBuffer(
223 gOutBuffer[j], CL_MEM_WRITE_ONLY, CL_BUFFER_CREATE_TYPE_REGION,
224 ®ion, &error);
225 if (error || NULL == test_info.tinfo[i].outBuf[j])
226 {
227 vlog_error("Error: Unable to create sub-buffer of "
228 "gOutBuffer[%d] for region {%zd, %zd}\n",
229 (int)j, region.origin, region.size);
230 goto exit;
231 }
232 }
233 test_info.tinfo[i].tQueue =
234 clCreateCommandQueue(gContext, gDevice, 0, &error);
235 if (NULL == test_info.tinfo[i].tQueue || error)
236 {
237 vlog_error("clCreateCommandQueue failed. (%d)\n", error);
238 goto exit;
239 }
240 }
241
242 // Init the kernels
243 {
244 BuildKernelInfo build_info = {
245 gMinVectorSizeIndex, test_info.threadCount, test_info.k,
246 test_info.programs, f->nameInCode, relaxedMode
247 };
248 if ((error = ThreadPool_Do(BuildKernelFn,
249 gMaxVectorSizeIndex - gMinVectorSizeIndex,
250 &build_info)))
251 goto exit;
252 }
253
254 // Run the kernels
255 if (!gSkipCorrectnessTesting)
256 {
257 error = ThreadPool_Do(Test, test_info.jobCount, &test_info);
258
259 if (error) goto exit;
260
261 if (gWimpyMode)
262 vlog("Wimp pass");
263 else
264 vlog("passed");
265 }
266
267 vlog("\n");
268
269 exit:
270 // Release
271 for (auto i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
272 {
273 clReleaseProgram(test_info.programs[i]);
274 if (test_info.k[i])
275 {
276 for (cl_uint j = 0; j < test_info.threadCount; j++)
277 clReleaseKernel(test_info.k[i][j]);
278
279 free(test_info.k[i]);
280 }
281 }
282 if (test_info.tinfo)
283 {
284 for (cl_uint i = 0; i < test_info.threadCount; i++)
285 {
286 clReleaseMemObject(test_info.tinfo[i].inBuf);
287 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
288 clReleaseMemObject(test_info.tinfo[i].outBuf[j]);
289 clReleaseCommandQueue(test_info.tinfo[i].tQueue);
290 }
291
292 free(test_info.tinfo);
293 }
294
295 return error;
296 }
297
Test(cl_uint job_id,cl_uint thread_id,void * data)298 static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
299 {
300 const TestInfo *job = (const TestInfo *)data;
301 size_t buffer_elements = job->subBufferSize;
302 size_t buffer_size = buffer_elements * sizeof(cl_double);
303 cl_uint scale = job->scale;
304 cl_uint base = job_id * (cl_uint)job->step;
305 ThreadInfo *tinfo = job->tinfo + thread_id;
306 dptr dfunc = job->f->dfunc;
307 int ftz = job->ftz;
308 cl_int error;
309 const char *name = job->f->name;
310
311 Force64BitFPUPrecision();
312
313 // start the map of the output arrays
314 cl_event e[VECTOR_SIZE_COUNT];
315 cl_long *out[VECTOR_SIZE_COUNT];
316 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
317 {
318 out[j] = (cl_long *)clEnqueueMapBuffer(
319 tinfo->tQueue, tinfo->outBuf[j], CL_FALSE, CL_MAP_WRITE, 0,
320 buffer_size, 0, NULL, e + j, &error);
321 if (error || NULL == out[j])
322 {
323 vlog_error("Error: clEnqueueMapBuffer %d failed! err: %d\n", j,
324 error);
325 return error;
326 }
327 }
328
329 // Get that moving
330 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush failed\n");
331
332 // Write the new values to the input array
333 cl_double *p = (cl_double *)gIn + thread_id * buffer_elements;
334 for (size_t j = 0; j < buffer_elements; j++)
335 p[j] = DoubleFromUInt32(base + j * scale);
336
337 if ((error = clEnqueueWriteBuffer(tinfo->tQueue, tinfo->inBuf, CL_FALSE, 0,
338 buffer_size, p, 0, NULL, NULL)))
339 {
340 vlog_error("Error: clEnqueueWriteBuffer failed! err: %d\n", error);
341 return error;
342 }
343
344 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
345 {
346 // Wait for the map to finish
347 if ((error = clWaitForEvents(1, e + j)))
348 {
349 vlog_error("Error: clWaitForEvents failed! err: %d\n", error);
350 return error;
351 }
352 if ((error = clReleaseEvent(e[j])))
353 {
354 vlog_error("Error: clReleaseEvent failed! err: %d\n", error);
355 return error;
356 }
357
358 // Fill the result buffer with garbage, so that old results don't carry
359 // over
360 uint32_t pattern = 0xffffdead;
361 memset_pattern4(out[j], &pattern, buffer_size);
362 if ((error = clEnqueueUnmapMemObject(tinfo->tQueue, tinfo->outBuf[j],
363 out[j], 0, NULL, NULL)))
364 {
365 vlog_error("Error: clEnqueueMapBuffer failed! err: %d\n", error);
366 return error;
367 }
368
369 // run the kernel
370 size_t vectorCount =
371 (buffer_elements + sizeValues[j] - 1) / sizeValues[j];
372 cl_kernel kernel = job->k[j][thread_id]; // each worker thread has its
373 // own copy of the cl_kernel
374 cl_program program = job->programs[j];
375
376 if ((error = clSetKernelArg(kernel, 0, sizeof(tinfo->outBuf[j]),
377 &tinfo->outBuf[j])))
378 {
379 LogBuildError(program);
380 return error;
381 }
382 if ((error = clSetKernelArg(kernel, 1, sizeof(tinfo->inBuf),
383 &tinfo->inBuf)))
384 {
385 LogBuildError(program);
386 return error;
387 }
388
389 if ((error = clEnqueueNDRangeKernel(tinfo->tQueue, kernel, 1, NULL,
390 &vectorCount, NULL, 0, NULL, NULL)))
391 {
392 vlog_error("FAILED -- could not execute kernel\n");
393 return error;
394 }
395 }
396
397 // Get that moving
398 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush 2 failed\n");
399
400 if (gSkipCorrectnessTesting) return CL_SUCCESS;
401
402 // Calculate the correctly rounded reference result
403 cl_long *r = (cl_long *)gOut_Ref + thread_id * buffer_elements;
404 cl_double *s = (cl_double *)p;
405 for (size_t j = 0; j < buffer_elements; j++) r[j] = dfunc.i_f(s[j]);
406
407 // Read the data back -- no need to wait for the first N-1 buffers but wait
408 // for the last buffer. This is an in order queue.
409 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
410 {
411 cl_bool blocking = (j + 1 < gMaxVectorSizeIndex) ? CL_FALSE : CL_TRUE;
412 out[j] = (cl_long *)clEnqueueMapBuffer(
413 tinfo->tQueue, tinfo->outBuf[j], blocking, CL_MAP_READ, 0,
414 buffer_size, 0, NULL, NULL, &error);
415 if (error || NULL == out[j])
416 {
417 vlog_error("Error: clEnqueueMapBuffer %d failed! err: %d\n", j,
418 error);
419 return error;
420 }
421 }
422
423 // Verify data
424 cl_long *t = (cl_long *)r;
425 for (size_t j = 0; j < buffer_elements; j++)
426 {
427 cl_long *q = out[0];
428
429 // If we aren't getting the correctly rounded result
430 if (gMinVectorSizeIndex == 0 && t[j] != q[j])
431 {
432 // If we aren't getting the correctly rounded result
433 if (ftz)
434 {
435 if (IsDoubleSubnormal(s[j]))
436 {
437 cl_long correct = dfunc.i_f(+0.0f);
438 cl_long correct2 = dfunc.i_f(-0.0f);
439 if (correct == q[j] || correct2 == q[j]) continue;
440 }
441 }
442
443 cl_ulong err = t[j] - q[j];
444 if (q[j] > t[j]) err = q[j] - t[j];
445 vlog_error("\nERROR: %sD: %zd ulp error at %.13la: *%zd vs. %zd\n",
446 name, err, ((double *)gIn)[j], t[j], q[j]);
447 return -1;
448 }
449
450
451 for (auto k = MAX(1, gMinVectorSizeIndex); k < gMaxVectorSizeIndex; k++)
452 {
453 q = out[k];
454 // If we aren't getting the correctly rounded result
455 if (-t[j] != q[j])
456 {
457 if (ftz)
458 {
459 if (IsDoubleSubnormal(s[j]))
460 {
461 int64_t correct = -dfunc.i_f(+0.0f);
462 int64_t correct2 = -dfunc.i_f(-0.0f);
463 if (correct == q[j] || correct2 == q[j]) continue;
464 }
465 }
466
467 cl_ulong err = -t[j] - q[j];
468 if (q[j] > -t[j]) err = q[j] + t[j];
469 vlog_error(
470 "\nERROR: %sD%s: %zd ulp error at %.13la: *%zd vs. %zd\n",
471 name, sizeNames[k], err, ((double *)gIn)[j], -t[j], q[j]);
472 return -1;
473 }
474 }
475 }
476
477 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
478 {
479 if ((error = clEnqueueUnmapMemObject(tinfo->tQueue, tinfo->outBuf[j],
480 out[j], 0, NULL, NULL)))
481 {
482 vlog_error("Error: clEnqueueUnmapMemObject %d failed 2! err: %d\n",
483 j, error);
484 return error;
485 }
486 }
487
488 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush 3 failed\n");
489
490
491 if (0 == (base & 0x0fffffff))
492 {
493 if (gVerboseBruteForce)
494 {
495 vlog("base:%14u step:%10u scale:%10u buf_elements:%10zd "
496 "ThreadCount:%2u\n",
497 base, job->step, job->scale, buffer_elements,
498 job->threadCount);
499 }
500 else
501 {
502 vlog(".");
503 }
504 fflush(stdout);
505 }
506
507 return CL_SUCCESS;
508 }
509