1 //
2 // Copyright (c) 2017 The Khronos Group Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "function_list.h"
18 #include "test_functions.h"
19 #include "utility.h"
20
21 #include <cstring>
22
BuildKernel(const char * name,int vectorSize,cl_uint kernel_count,cl_kernel * k,cl_program * p,bool relaxedMode)23 static int BuildKernel(const char *name, int vectorSize, cl_uint kernel_count,
24 cl_kernel *k, cl_program *p, bool relaxedMode)
25 {
26 const char *c[] = { "__kernel void math_kernel",
27 sizeNames[vectorSize],
28 "( __global float",
29 sizeNames[vectorSize],
30 "* out, __global float",
31 sizeNames[vectorSize],
32 "* in )\n"
33 "{\n"
34 " size_t i = get_global_id(0);\n"
35 " out[i] = ",
36 name,
37 "( in[i] );\n"
38 "}\n" };
39
40 const char *c3[] = {
41 "__kernel void math_kernel",
42 sizeNames[vectorSize],
43 "( __global float* out, __global float* in)\n"
44 "{\n"
45 " size_t i = get_global_id(0);\n"
46 " if( i + 1 < get_global_size(0) )\n"
47 " {\n"
48 " float3 f0 = vload3( 0, in + 3 * i );\n"
49 " f0 = ",
50 name,
51 "( f0 );\n"
52 " vstore3( f0, 0, out + 3*i );\n"
53 " }\n"
54 " else\n"
55 " {\n"
56 " size_t parity = i & 1; // Figure out how many elements are "
57 "left over after BUFFER_SIZE % (3*sizeof(float)). Assume power of two "
58 "buffer size \n"
59 " float3 f0;\n"
60 " switch( parity )\n"
61 " {\n"
62 " case 1:\n"
63 " f0 = (float3)( in[3*i], NAN, NAN ); \n"
64 " break;\n"
65 " case 0:\n"
66 " f0 = (float3)( in[3*i], in[3*i+1], NAN ); \n"
67 " break;\n"
68 " }\n"
69 " f0 = ",
70 name,
71 "( f0 );\n"
72 " switch( parity )\n"
73 " {\n"
74 " case 0:\n"
75 " out[3*i+1] = f0.y; \n"
76 " // fall through\n"
77 " case 1:\n"
78 " out[3*i] = f0.x; \n"
79 " break;\n"
80 " }\n"
81 " }\n"
82 "}\n"
83 };
84
85 const char **kern = c;
86 size_t kernSize = sizeof(c) / sizeof(c[0]);
87
88 if (sizeValues[vectorSize] == 3)
89 {
90 kern = c3;
91 kernSize = sizeof(c3) / sizeof(c3[0]);
92 }
93
94 char testName[32];
95 snprintf(testName, sizeof(testName) - 1, "math_kernel%s",
96 sizeNames[vectorSize]);
97
98 return MakeKernels(kern, (cl_uint)kernSize, testName, kernel_count, k, p,
99 relaxedMode);
100 }
101
102 typedef struct BuildKernelInfo
103 {
104 cl_uint offset; // the first vector size to build
105 cl_uint kernel_count;
106 cl_kernel **kernels;
107 cl_program *programs;
108 const char *nameInCode;
109 bool relaxedMode; // Whether to build with -cl-fast-relaxed-math.
110 } BuildKernelInfo;
111
BuildKernelFn(cl_uint job_id,cl_uint thread_id UNUSED,void * p)112 static cl_int BuildKernelFn(cl_uint job_id, cl_uint thread_id UNUSED, void *p)
113 {
114 BuildKernelInfo *info = (BuildKernelInfo *)p;
115 cl_uint i = info->offset + job_id;
116 return BuildKernel(info->nameInCode, i, info->kernel_count,
117 info->kernels[i], info->programs + i, info->relaxedMode);
118 }
119
120 // Thread specific data for a worker thread
121 typedef struct ThreadInfo
122 {
123 cl_mem inBuf; // input buffer for the thread
124 cl_mem outBuf[VECTOR_SIZE_COUNT]; // output buffers for the thread
125 float maxError; // max error value. Init to 0.
126 double maxErrorValue; // position of the max error value. Init to 0.
127 cl_command_queue tQueue; // per thread command queue to improve performance
128 } ThreadInfo;
129
130 typedef struct TestInfo
131 {
132 size_t subBufferSize; // Size of the sub-buffer in elements
133 const Func *f; // A pointer to the function info
134 cl_program programs[VECTOR_SIZE_COUNT]; // programs for various vector sizes
135 cl_kernel
136 *k[VECTOR_SIZE_COUNT]; // arrays of thread-specific kernels for each
137 // worker thread: k[vector_size][thread_id]
138 ThreadInfo *
139 tinfo; // An array of thread specific information for each worker thread
140 cl_uint threadCount; // Number of worker threads
141 cl_uint jobCount; // Number of jobs
142 cl_uint step; // step between each chunk and the next.
143 cl_uint scale; // stride between individual test values
144 float ulps; // max_allowed ulps
145 int ftz; // non-zero if running in flush to zero mode
146
147 int isRangeLimited; // 1 if the function is only to be evaluated over a
148 // range
149 float half_sin_cos_tan_limit;
150 bool relaxedMode; // True if test is running in relaxed mode, false
151 // otherwise.
152 } TestInfo;
153
154 static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data);
155
TestFunc_Float_Float(const Func * f,MTdata d,bool relaxedMode)156 int TestFunc_Float_Float(const Func *f, MTdata d, bool relaxedMode)
157 {
158 TestInfo test_info;
159 cl_int error;
160 float maxError = 0.0f;
161 double maxErrorVal = 0.0;
162 int skipTestingRelaxed = (relaxedMode && strcmp(f->name, "tan") == 0);
163
164 logFunctionInfo(f->name, sizeof(cl_float), relaxedMode);
165
166 // Init test_info
167 memset(&test_info, 0, sizeof(test_info));
168 test_info.threadCount = GetThreadCount();
169 test_info.subBufferSize = BUFFER_SIZE
170 / (sizeof(cl_float) * RoundUpToNextPowerOfTwo(test_info.threadCount));
171 test_info.scale = getTestScale(sizeof(cl_float));
172
173 test_info.step = (cl_uint)test_info.subBufferSize * test_info.scale;
174 if (test_info.step / test_info.subBufferSize != test_info.scale)
175 {
176 // there was overflow
177 test_info.jobCount = 1;
178 }
179 else
180 {
181 test_info.jobCount = (cl_uint)((1ULL << 32) / test_info.step);
182 }
183
184 test_info.f = f;
185 test_info.ulps = gIsEmbedded ? f->float_embedded_ulps : f->float_ulps;
186 test_info.ftz =
187 f->ftz || gForceFTZ || 0 == (CL_FP_DENORM & gFloatCapabilities);
188 test_info.relaxedMode = relaxedMode;
189 // cl_kernels aren't thread safe, so we make one for each vector size for
190 // every thread
191 for (auto i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
192 {
193 size_t array_size = test_info.threadCount * sizeof(cl_kernel);
194 test_info.k[i] = (cl_kernel *)malloc(array_size);
195 if (NULL == test_info.k[i])
196 {
197 vlog_error("Error: Unable to allocate storage for kernels!\n");
198 error = CL_OUT_OF_HOST_MEMORY;
199 goto exit;
200 }
201 memset(test_info.k[i], 0, array_size);
202 }
203 test_info.tinfo =
204 (ThreadInfo *)malloc(test_info.threadCount * sizeof(*test_info.tinfo));
205 if (NULL == test_info.tinfo)
206 {
207 vlog_error(
208 "Error: Unable to allocate storage for thread specific data.\n");
209 error = CL_OUT_OF_HOST_MEMORY;
210 goto exit;
211 }
212 memset(test_info.tinfo, 0,
213 test_info.threadCount * sizeof(*test_info.tinfo));
214 for (cl_uint i = 0; i < test_info.threadCount; i++)
215 {
216 cl_buffer_region region = {
217 i * test_info.subBufferSize * sizeof(cl_float),
218 test_info.subBufferSize * sizeof(cl_float)
219 };
220 test_info.tinfo[i].inBuf =
221 clCreateSubBuffer(gInBuffer, CL_MEM_READ_ONLY,
222 CL_BUFFER_CREATE_TYPE_REGION, ®ion, &error);
223 if (error || NULL == test_info.tinfo[i].inBuf)
224 {
225 vlog_error("Error: Unable to create sub-buffer of gInBuffer for "
226 "region {%zd, %zd}\n",
227 region.origin, region.size);
228 goto exit;
229 }
230
231 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
232 {
233 test_info.tinfo[i].outBuf[j] = clCreateSubBuffer(
234 gOutBuffer[j], CL_MEM_WRITE_ONLY, CL_BUFFER_CREATE_TYPE_REGION,
235 ®ion, &error);
236 if (error || NULL == test_info.tinfo[i].outBuf[j])
237 {
238 vlog_error("Error: Unable to create sub-buffer of "
239 "gOutBuffer[%d] for region {%zd, %zd}\n",
240 (int)j, region.origin, region.size);
241 goto exit;
242 }
243 }
244 test_info.tinfo[i].tQueue =
245 clCreateCommandQueue(gContext, gDevice, 0, &error);
246 if (NULL == test_info.tinfo[i].tQueue || error)
247 {
248 vlog_error("clCreateCommandQueue failed. (%d)\n", error);
249 goto exit;
250 }
251 }
252
253 // Check for special cases for unary float
254 test_info.isRangeLimited = 0;
255 test_info.half_sin_cos_tan_limit = 0;
256 if (0 == strcmp(f->name, "half_sin") || 0 == strcmp(f->name, "half_cos"))
257 {
258 test_info.isRangeLimited = 1;
259 test_info.half_sin_cos_tan_limit = 1.0f
260 + test_info.ulps
261 * (FLT_EPSILON / 2.0f); // out of range results from finite
262 // inputs must be in [-1,1]
263 }
264 else if (0 == strcmp(f->name, "half_tan"))
265 {
266 test_info.isRangeLimited = 1;
267 test_info.half_sin_cos_tan_limit =
268 INFINITY; // out of range resut from finite inputs must be numeric
269 }
270
271 // Init the kernels
272 {
273 BuildKernelInfo build_info = {
274 gMinVectorSizeIndex, test_info.threadCount, test_info.k,
275 test_info.programs, f->nameInCode, relaxedMode
276 };
277 if ((error = ThreadPool_Do(BuildKernelFn,
278 gMaxVectorSizeIndex - gMinVectorSizeIndex,
279 &build_info)))
280 goto exit;
281 }
282
283 // Run the kernels
284 if (!gSkipCorrectnessTesting || skipTestingRelaxed)
285 {
286 error = ThreadPool_Do(Test, test_info.jobCount, &test_info);
287
288 // Accumulate the arithmetic errors
289 for (cl_uint i = 0; i < test_info.threadCount; i++)
290 {
291 if (test_info.tinfo[i].maxError > maxError)
292 {
293 maxError = test_info.tinfo[i].maxError;
294 maxErrorVal = test_info.tinfo[i].maxErrorValue;
295 }
296 }
297
298 if (error) goto exit;
299
300 if (gWimpyMode)
301 vlog("Wimp pass");
302 else
303 vlog("passed");
304
305 if (skipTestingRelaxed)
306 {
307 vlog(" (rlx skip correctness testing)\n");
308 goto exit;
309 }
310
311 vlog("\t%8.2f @ %a", maxError, maxErrorVal);
312 }
313
314 vlog("\n");
315
316 exit:
317 // Release
318 for (auto i = gMinVectorSizeIndex; i < gMaxVectorSizeIndex; i++)
319 {
320 clReleaseProgram(test_info.programs[i]);
321 if (test_info.k[i])
322 {
323 for (cl_uint j = 0; j < test_info.threadCount; j++)
324 clReleaseKernel(test_info.k[i][j]);
325
326 free(test_info.k[i]);
327 }
328 }
329 if (test_info.tinfo)
330 {
331 for (cl_uint i = 0; i < test_info.threadCount; i++)
332 {
333 clReleaseMemObject(test_info.tinfo[i].inBuf);
334 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
335 clReleaseMemObject(test_info.tinfo[i].outBuf[j]);
336 clReleaseCommandQueue(test_info.tinfo[i].tQueue);
337 }
338
339 free(test_info.tinfo);
340 }
341
342 return error;
343 }
344
Test(cl_uint job_id,cl_uint thread_id,void * data)345 static cl_int Test(cl_uint job_id, cl_uint thread_id, void *data)
346 {
347 const TestInfo *job = (const TestInfo *)data;
348 size_t buffer_elements = job->subBufferSize;
349 size_t buffer_size = buffer_elements * sizeof(cl_float);
350 cl_uint scale = job->scale;
351 cl_uint base = job_id * (cl_uint)job->step;
352 ThreadInfo *tinfo = job->tinfo + thread_id;
353 fptr func = job->f->func;
354 const char *fname = job->f->name;
355 bool relaxedMode = job->relaxedMode;
356 float ulps = getAllowedUlpError(job->f, relaxedMode);
357 if (relaxedMode)
358 {
359 func = job->f->rfunc;
360 }
361
362 cl_int error;
363
364 int isRangeLimited = job->isRangeLimited;
365 float half_sin_cos_tan_limit = job->half_sin_cos_tan_limit;
366 int ftz = job->ftz;
367
368 // start the map of the output arrays
369 cl_event e[VECTOR_SIZE_COUNT];
370 cl_uint *out[VECTOR_SIZE_COUNT];
371 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
372 {
373 out[j] = (cl_uint *)clEnqueueMapBuffer(
374 tinfo->tQueue, tinfo->outBuf[j], CL_FALSE, CL_MAP_WRITE, 0,
375 buffer_size, 0, NULL, e + j, &error);
376 if (error || NULL == out[j])
377 {
378 vlog_error("Error: clEnqueueMapBuffer %d failed! err: %d\n", j,
379 error);
380 return error;
381 }
382 }
383
384 // Get that moving
385 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush failed\n");
386
387 // Write the new values to the input array
388 cl_uint *p = (cl_uint *)gIn + thread_id * buffer_elements;
389 for (size_t j = 0; j < buffer_elements; j++)
390 {
391 p[j] = base + j * scale;
392 if (relaxedMode)
393 {
394 float p_j = *(float *)&p[j];
395 if (strcmp(fname, "sin") == 0
396 || strcmp(fname, "cos")
397 == 0) // the domain of the function is [-pi,pi]
398 {
399 if (fabs(p_j) > M_PI) ((float *)p)[j] = NAN;
400 }
401
402 if (strcmp(fname, "reciprocal") == 0)
403 {
404 const float l_limit = HEX_FLT(+, 1, 0, -, 126);
405 const float u_limit = HEX_FLT(+, 1, 0, +, 126);
406
407 if (fabs(p_j) < l_limit
408 || fabs(p_j) > u_limit) // the domain of the function is
409 // [2^-126,2^126]
410 ((float *)p)[j] = NAN;
411 }
412 }
413 }
414
415 if ((error = clEnqueueWriteBuffer(tinfo->tQueue, tinfo->inBuf, CL_FALSE, 0,
416 buffer_size, p, 0, NULL, NULL)))
417 {
418 vlog_error("Error: clEnqueueWriteBuffer failed! err: %d\n", error);
419 return error;
420 }
421
422 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
423 {
424 // Wait for the map to finish
425 if ((error = clWaitForEvents(1, e + j)))
426 {
427 vlog_error("Error: clWaitForEvents failed! err: %d\n", error);
428 return error;
429 }
430 if ((error = clReleaseEvent(e[j])))
431 {
432 vlog_error("Error: clReleaseEvent failed! err: %d\n", error);
433 return error;
434 }
435
436 // Fill the result buffer with garbage, so that old results don't carry
437 // over
438 uint32_t pattern = 0xffffdead;
439 memset_pattern4(out[j], &pattern, buffer_size);
440 if ((error = clEnqueueUnmapMemObject(tinfo->tQueue, tinfo->outBuf[j],
441 out[j], 0, NULL, NULL)))
442 {
443 vlog_error("Error: clEnqueueMapBuffer failed! err: %d\n", error);
444 return error;
445 }
446
447 // run the kernel
448 size_t vectorCount =
449 (buffer_elements + sizeValues[j] - 1) / sizeValues[j];
450 cl_kernel kernel = job->k[j][thread_id]; // each worker thread has its
451 // own copy of the cl_kernel
452 cl_program program = job->programs[j];
453
454 if ((error = clSetKernelArg(kernel, 0, sizeof(tinfo->outBuf[j]),
455 &tinfo->outBuf[j])))
456 {
457 LogBuildError(program);
458 return error;
459 }
460 if ((error = clSetKernelArg(kernel, 1, sizeof(tinfo->inBuf),
461 &tinfo->inBuf)))
462 {
463 LogBuildError(program);
464 return error;
465 }
466
467 if ((error = clEnqueueNDRangeKernel(tinfo->tQueue, kernel, 1, NULL,
468 &vectorCount, NULL, 0, NULL, NULL)))
469 {
470 vlog_error("FAILED -- could not execute kernel\n");
471 return error;
472 }
473 }
474
475 // Get that moving
476 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush 2 failed\n");
477
478 if (gSkipCorrectnessTesting) return CL_SUCCESS;
479
480 // Calculate the correctly rounded reference result
481 float *r = (float *)gOut_Ref + thread_id * buffer_elements;
482 float *s = (float *)p;
483 for (size_t j = 0; j < buffer_elements; j++) r[j] = (float)func.f_f(s[j]);
484
485 // Read the data back -- no need to wait for the first N-1 buffers but wait
486 // for the last buffer. This is an in order queue.
487 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
488 {
489 cl_bool blocking = (j + 1 < gMaxVectorSizeIndex) ? CL_FALSE : CL_TRUE;
490 out[j] = (cl_uint *)clEnqueueMapBuffer(
491 tinfo->tQueue, tinfo->outBuf[j], blocking, CL_MAP_READ, 0,
492 buffer_size, 0, NULL, NULL, &error);
493 if (error || NULL == out[j])
494 {
495 vlog_error("Error: clEnqueueMapBuffer %d failed! err: %d\n", j,
496 error);
497 return error;
498 }
499 }
500
501 // Verify data
502 uint32_t *t = (uint32_t *)r;
503 for (size_t j = 0; j < buffer_elements; j++)
504 {
505 for (auto k = gMinVectorSizeIndex; k < gMaxVectorSizeIndex; k++)
506 {
507 uint32_t *q = out[k];
508
509 // If we aren't getting the correctly rounded result
510 if (t[j] != q[j])
511 {
512 float test = ((float *)q)[j];
513 double correct = func.f_f(s[j]);
514 float err = Ulp_Error(test, correct);
515 float abs_error = Abs_Error(test, correct);
516 int fail = 0;
517 int use_abs_error = 0;
518
519 // it is possible for the output to not match the reference
520 // result but for Ulp_Error to be zero, for example -1.#QNAN
521 // vs. 1.#QNAN. In such cases there is no failure
522 if (err == 0.0f)
523 {
524 fail = 0;
525 }
526 else if (relaxedMode)
527 {
528 if (strcmp(fname, "sin") == 0 || strcmp(fname, "cos") == 0)
529 {
530 fail = !(fabsf(abs_error) <= ulps);
531 use_abs_error = 1;
532 }
533 if (strcmp(fname, "sinpi") == 0
534 || strcmp(fname, "cospi") == 0)
535 {
536 if (s[j] >= -1.0 && s[j] <= 1.0)
537 {
538 fail = !(fabsf(abs_error) <= ulps);
539 use_abs_error = 1;
540 }
541 }
542
543 if (strcmp(fname, "reciprocal") == 0)
544 {
545 fail = !(fabsf(err) <= ulps);
546 }
547
548 if (strcmp(fname, "exp") == 0 || strcmp(fname, "exp2") == 0)
549 {
550 float exp_error = ulps;
551
552 if (!gIsEmbedded)
553 {
554 exp_error += floor(fabs(2 * s[j]));
555 }
556
557 fail = !(fabsf(err) <= exp_error);
558 ulps = exp_error;
559 }
560 if (strcmp(fname, "tan") == 0)
561 {
562
563 if (!gFastRelaxedDerived)
564 {
565 fail = !(fabsf(err) <= ulps);
566 }
567 // Else fast math derived implementation does not
568 // require ULP verification
569 }
570 if (strcmp(fname, "exp10") == 0)
571 {
572 if (!gFastRelaxedDerived)
573 {
574 fail = !(fabsf(err) <= ulps);
575 }
576 // Else fast math derived implementation does not
577 // require ULP verification
578 }
579 if (strcmp(fname, "log") == 0 || strcmp(fname, "log2") == 0
580 || strcmp(fname, "log10") == 0)
581 {
582 if (s[j] >= 0.5 && s[j] <= 2)
583 {
584 fail = !(fabsf(abs_error) <= ulps);
585 }
586 else
587 {
588 ulps = gIsEmbedded ? job->f->float_embedded_ulps
589 : job->f->float_ulps;
590 fail = !(fabsf(err) <= ulps);
591 }
592 }
593
594
595 // fast-relaxed implies finite-only
596 if (IsFloatInfinity(correct) || IsFloatNaN(correct)
597 || IsFloatInfinity(s[j]) || IsFloatNaN(s[j]))
598 {
599 fail = 0;
600 err = 0;
601 }
602 }
603 else
604 {
605 fail = !(fabsf(err) <= ulps);
606 }
607
608 // half_sin/cos/tan are only valid between +-2**16, Inf, NaN
609 if (isRangeLimited
610 && fabsf(s[j]) > MAKE_HEX_FLOAT(0x1.0p16f, 0x1L, 16)
611 && fabsf(s[j]) < INFINITY)
612 {
613 if (fabsf(test) <= half_sin_cos_tan_limit)
614 {
615 err = 0;
616 fail = 0;
617 }
618 }
619
620 if (fail)
621 {
622 if (ftz)
623 {
624 typedef int (*CheckForSubnormal)(
625 double, float); // If we are in fast relaxed math,
626 // we have a different calculation
627 // for the subnormal threshold.
628 CheckForSubnormal isFloatResultSubnormalPtr;
629
630 if (relaxedMode)
631 {
632 isFloatResultSubnormalPtr =
633 &IsFloatResultSubnormalAbsError;
634 }
635 else
636 {
637 isFloatResultSubnormalPtr = &IsFloatResultSubnormal;
638 }
639 // retry per section 6.5.3.2
640 if ((*isFloatResultSubnormalPtr)(correct, ulps))
641 {
642 fail = fail && (test != 0.0f);
643 if (!fail) err = 0.0f;
644 }
645
646 // retry per section 6.5.3.3
647 if (IsFloatSubnormal(s[j]))
648 {
649 double correct2 = func.f_f(0.0);
650 double correct3 = func.f_f(-0.0);
651 float err2;
652 float err3;
653 if (use_abs_error)
654 {
655 err2 = Abs_Error(test, correct2);
656 err3 = Abs_Error(test, correct3);
657 }
658 else
659 {
660 err2 = Ulp_Error(test, correct2);
661 err3 = Ulp_Error(test, correct3);
662 }
663 fail = fail
664 && ((!(fabsf(err2) <= ulps))
665 && (!(fabsf(err3) <= ulps)));
666 if (fabsf(err2) < fabsf(err)) err = err2;
667 if (fabsf(err3) < fabsf(err)) err = err3;
668
669 // retry per section 6.5.3.4
670 if ((*isFloatResultSubnormalPtr)(correct2, ulps)
671 || (*isFloatResultSubnormalPtr)(correct3, ulps))
672 {
673 fail = fail && (test != 0.0f);
674 if (!fail) err = 0.0f;
675 }
676 }
677 }
678 }
679 if (fabsf(err) > tinfo->maxError)
680 {
681 tinfo->maxError = fabsf(err);
682 tinfo->maxErrorValue = s[j];
683 }
684 if (fail)
685 {
686 vlog_error("\nERROR: %s%s: %f ulp error at %a (0x%8.8x): "
687 "*%a vs. %a\n",
688 job->f->name, sizeNames[k], err, ((float *)s)[j],
689 ((uint32_t *)s)[j], ((float *)t)[j], test);
690 return -1;
691 }
692 }
693 }
694 }
695
696 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
697 {
698 if ((error = clEnqueueUnmapMemObject(tinfo->tQueue, tinfo->outBuf[j],
699 out[j], 0, NULL, NULL)))
700 {
701 vlog_error("Error: clEnqueueUnmapMemObject %d failed 2! err: %d\n",
702 j, error);
703 return error;
704 }
705 }
706
707 if ((error = clFlush(tinfo->tQueue))) vlog("clFlush 3 failed\n");
708
709
710 if (0 == (base & 0x0fffffff))
711 {
712 if (gVerboseBruteForce)
713 {
714 vlog("base:%14u step:%10u scale:%10u buf_elements:%10zd ulps:%5.3f "
715 "ThreadCount:%2u\n",
716 base, job->step, job->scale, buffer_elements, job->ulps,
717 job->threadCount);
718 }
719 else
720 {
721 vlog(".");
722 }
723 fflush(stdout);
724 }
725
726 return CL_SUCCESS;
727 }
728