1 //
2 // Copyright (c) 2017 The Khronos Group Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "function_list.h"
18 #include "test_functions.h"
19 #include "utility.h"
20
21 #include <cstring>
22
BuildKernel(const char * name,int vectorSize,cl_kernel * k,cl_program * p,bool relaxedMode)23 static int BuildKernel(const char *name, int vectorSize, cl_kernel *k,
24 cl_program *p, bool relaxedMode)
25 {
26 const char *c[] = { "__kernel void math_kernel",
27 sizeNames[vectorSize],
28 "( __global float",
29 sizeNames[vectorSize],
30 "* out, __global float",
31 sizeNames[vectorSize],
32 "* out2, __global float",
33 sizeNames[vectorSize],
34 "* in )\n"
35 "{\n"
36 " size_t i = get_global_id(0);\n"
37 " out[i] = ",
38 name,
39 "( in[i], out2 + i );\n"
40 "}\n" };
41
42 const char *c3[] = {
43 "__kernel void math_kernel",
44 sizeNames[vectorSize],
45 "( __global float* out, __global float* out2, __global float* in)\n"
46 "{\n"
47 " size_t i = get_global_id(0);\n"
48 " if( i + 1 < get_global_size(0) )\n"
49 " {\n"
50 " float3 f0 = vload3( 0, in + 3 * i );\n"
51 " float3 iout = NAN;\n"
52 " f0 = ",
53 name,
54 "( f0, &iout );\n"
55 " vstore3( f0, 0, out + 3*i );\n"
56 " vstore3( iout, 0, out2 + 3*i );\n"
57 " }\n"
58 " else\n"
59 " {\n"
60 " size_t parity = i & 1; // Figure out how many elements are "
61 "left over after BUFFER_SIZE % (3*sizeof(float)). Assume power of two "
62 "buffer size \n"
63 " float3 iout = NAN;\n"
64 " float3 f0;\n"
65 " switch( parity )\n"
66 " {\n"
67 " case 1:\n"
68 " f0 = (float3)( in[3*i], NAN, NAN ); \n"
69 " break;\n"
70 " case 0:\n"
71 " f0 = (float3)( in[3*i], in[3*i+1], NAN ); \n"
72 " break;\n"
73 " }\n"
74 " f0 = ",
75 name,
76 "( f0, &iout );\n"
77 " switch( parity )\n"
78 " {\n"
79 " case 0:\n"
80 " out[3*i+1] = f0.y; \n"
81 " out2[3*i+1] = iout.y; \n"
82 " // fall through\n"
83 " case 1:\n"
84 " out[3*i] = f0.x; \n"
85 " out2[3*i] = iout.x; \n"
86 " break;\n"
87 " }\n"
88 " }\n"
89 "}\n"
90 };
91
92 const char **kern = c;
93 size_t kernSize = sizeof(c) / sizeof(c[0]);
94
95 if (sizeValues[vectorSize] == 3)
96 {
97 kern = c3;
98 kernSize = sizeof(c3) / sizeof(c3[0]);
99 }
100
101 char testName[32];
102 snprintf(testName, sizeof(testName) - 1, "math_kernel%s",
103 sizeNames[vectorSize]);
104
105 return MakeKernel(kern, (cl_uint)kernSize, testName, k, p, relaxedMode);
106 }
107
108 typedef struct BuildKernelInfo
109 {
110 cl_uint offset; // the first vector size to build
111 cl_kernel *kernels;
112 cl_program *programs;
113 const char *nameInCode;
114 bool relaxedMode; // Whether to build with -cl-fast-relaxed-math.
115 } BuildKernelInfo;
116
BuildKernelFn(cl_uint job_id,cl_uint thread_id UNUSED,void * p)117 static cl_int BuildKernelFn(cl_uint job_id, cl_uint thread_id UNUSED, void *p)
118 {
119 BuildKernelInfo *info = (BuildKernelInfo *)p;
120 cl_uint i = info->offset + job_id;
121 return BuildKernel(info->nameInCode, i, info->kernels + i,
122 info->programs + i, info->relaxedMode);
123 }
124
TestFunc_Float2_Float(const Func * f,MTdata d,bool relaxedMode)125 int TestFunc_Float2_Float(const Func *f, MTdata d, bool relaxedMode)
126 {
127 int error;
128 cl_program programs[VECTOR_SIZE_COUNT];
129 cl_kernel kernels[VECTOR_SIZE_COUNT];
130 float maxError0 = 0.0f;
131 float maxError1 = 0.0f;
132 int ftz = f->ftz || gForceFTZ || 0 == (CL_FP_DENORM & gFloatCapabilities);
133 float maxErrorVal0 = 0.0f;
134 float maxErrorVal1 = 0.0f;
135 uint64_t step = getTestStep(sizeof(float), BUFFER_SIZE);
136 int scale = (int)((1ULL << 32) / (16 * BUFFER_SIZE / sizeof(float)) + 1);
137 cl_uchar overflow[BUFFER_SIZE / sizeof(float)];
138 int isFract = 0 == strcmp("fract", f->nameInCode);
139 int skipNanInf = isFract && !gInfNanSupport;
140
141 logFunctionInfo(f->name, sizeof(cl_float), relaxedMode);
142
143 float float_ulps = getAllowedUlpError(f, relaxedMode);
144 // Init the kernels
145 {
146 BuildKernelInfo build_info = { gMinVectorSizeIndex, kernels, programs,
147 f->nameInCode, relaxedMode };
148 if ((error = ThreadPool_Do(BuildKernelFn,
149 gMaxVectorSizeIndex - gMinVectorSizeIndex,
150 &build_info)))
151 return error;
152 }
153
154 for (uint64_t i = 0; i < (1ULL << 32); i += step)
155 {
156 // Init input array
157 uint32_t *p = (uint32_t *)gIn;
158 if (gWimpyMode)
159 {
160 for (size_t j = 0; j < BUFFER_SIZE / sizeof(float); j++)
161 {
162 p[j] = (uint32_t)i + j * scale;
163 if (relaxedMode && strcmp(f->name, "sincos") == 0)
164 {
165 float pj = *(float *)&p[j];
166 if (fabs(pj) > M_PI) ((float *)p)[j] = NAN;
167 }
168 }
169 }
170 else
171 {
172 for (size_t j = 0; j < BUFFER_SIZE / sizeof(float); j++)
173 {
174 p[j] = (uint32_t)i + j;
175 if (relaxedMode && strcmp(f->name, "sincos") == 0)
176 {
177 float pj = *(float *)&p[j];
178 if (fabs(pj) > M_PI) ((float *)p)[j] = NAN;
179 }
180 }
181 }
182
183 if ((error = clEnqueueWriteBuffer(gQueue, gInBuffer, CL_FALSE, 0,
184 BUFFER_SIZE, gIn, 0, NULL, NULL)))
185 {
186 vlog_error("\n*** Error %d in clEnqueueWriteBuffer ***\n", error);
187 return error;
188 }
189
190 // write garbage into output arrays
191 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
192 {
193 uint32_t pattern = 0xffffdead;
194 memset_pattern4(gOut[j], &pattern, BUFFER_SIZE);
195 if ((error =
196 clEnqueueWriteBuffer(gQueue, gOutBuffer[j], CL_FALSE, 0,
197 BUFFER_SIZE, gOut[j], 0, NULL, NULL)))
198 {
199 vlog_error("\n*** Error %d in clEnqueueWriteBuffer2(%d) ***\n",
200 error, j);
201 goto exit;
202 }
203
204 memset_pattern4(gOut2[j], &pattern, BUFFER_SIZE);
205 if ((error = clEnqueueWriteBuffer(gQueue, gOutBuffer2[j], CL_FALSE,
206 0, BUFFER_SIZE, gOut2[j], 0, NULL,
207 NULL)))
208 {
209 vlog_error("\n*** Error %d in clEnqueueWriteBuffer2b(%d) ***\n",
210 error, j);
211 goto exit;
212 }
213 }
214
215 // Run the kernels
216 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
217 {
218 size_t vectorSize = sizeValues[j] * sizeof(cl_float);
219 size_t localCount = (BUFFER_SIZE + vectorSize - 1) / vectorSize;
220 if ((error = clSetKernelArg(kernels[j], 0, sizeof(gOutBuffer[j]),
221 &gOutBuffer[j])))
222 {
223 LogBuildError(programs[j]);
224 goto exit;
225 }
226 if ((error = clSetKernelArg(kernels[j], 1, sizeof(gOutBuffer2[j]),
227 &gOutBuffer2[j])))
228 {
229 LogBuildError(programs[j]);
230 goto exit;
231 }
232 if ((error = clSetKernelArg(kernels[j], 2, sizeof(gInBuffer),
233 &gInBuffer)))
234 {
235 LogBuildError(programs[j]);
236 goto exit;
237 }
238
239 if ((error =
240 clEnqueueNDRangeKernel(gQueue, kernels[j], 1, NULL,
241 &localCount, NULL, 0, NULL, NULL)))
242 {
243 vlog_error("FAILED -- could not execute kernel\n");
244 goto exit;
245 }
246 }
247
248 // Get that moving
249 if ((error = clFlush(gQueue))) vlog("clFlush failed\n");
250
251 FPU_mode_type oldMode;
252 RoundingMode oldRoundMode = kRoundToNearestEven;
253 if (isFract)
254 {
255 // Calculate the correctly rounded reference result
256 memset(&oldMode, 0, sizeof(oldMode));
257 if (ftz) ForceFTZ(&oldMode);
258
259 // Set the rounding mode to match the device
260 if (gIsInRTZMode)
261 oldRoundMode = set_round(kRoundTowardZero, kfloat);
262 }
263
264 // Calculate the correctly rounded reference result
265 float *r = (float *)gOut_Ref;
266 float *r2 = (float *)gOut_Ref2;
267 float *s = (float *)gIn;
268
269 if (skipNanInf)
270 {
271 for (size_t j = 0; j < BUFFER_SIZE / sizeof(float); j++)
272 {
273 double dd;
274 feclearexcept(FE_OVERFLOW);
275
276 if (relaxedMode)
277 r[j] = (float)f->rfunc.f_fpf(s[j], &dd);
278 else
279 r[j] = (float)f->func.f_fpf(s[j], &dd);
280
281 r2[j] = (float)dd;
282 overflow[j] =
283 FE_OVERFLOW == (FE_OVERFLOW & fetestexcept(FE_OVERFLOW));
284 }
285 }
286 else
287 {
288 for (size_t j = 0; j < BUFFER_SIZE / sizeof(float); j++)
289 {
290 double dd;
291 if (relaxedMode)
292 r[j] = (float)f->rfunc.f_fpf(s[j], &dd);
293 else
294 r[j] = (float)f->func.f_fpf(s[j], &dd);
295
296 r2[j] = (float)dd;
297 }
298 }
299
300 if (isFract && ftz) RestoreFPState(&oldMode);
301
302 // Read the data back
303 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
304 {
305 if ((error =
306 clEnqueueReadBuffer(gQueue, gOutBuffer[j], CL_TRUE, 0,
307 BUFFER_SIZE, gOut[j], 0, NULL, NULL)))
308 {
309 vlog_error("ReadArray failed %d\n", error);
310 goto exit;
311 }
312 if ((error =
313 clEnqueueReadBuffer(gQueue, gOutBuffer2[j], CL_TRUE, 0,
314 BUFFER_SIZE, gOut2[j], 0, NULL, NULL)))
315 {
316 vlog_error("ReadArray2 failed %d\n", error);
317 goto exit;
318 }
319 }
320
321 if (gSkipCorrectnessTesting)
322 {
323 if (isFract && gIsInRTZMode) (void)set_round(oldRoundMode, kfloat);
324 break;
325 }
326
327 // Verify data
328 uint32_t *t = (uint32_t *)gOut_Ref;
329 uint32_t *t2 = (uint32_t *)gOut_Ref2;
330 for (size_t j = 0; j < BUFFER_SIZE / sizeof(float); j++)
331 {
332 for (auto k = gMinVectorSizeIndex; k < gMaxVectorSizeIndex; k++)
333 {
334 uint32_t *q = (uint32_t *)gOut[k];
335 uint32_t *q2 = (uint32_t *)gOut2[k];
336
337 // If we aren't getting the correctly rounded result
338 if (t[j] != q[j] || t2[j] != q2[j])
339 {
340 double correct, correct2;
341 float err, err2;
342 float test = ((float *)q)[j];
343 float test2 = ((float *)q2)[j];
344
345 if (relaxedMode)
346 correct = f->rfunc.f_fpf(s[j], &correct2);
347 else
348 correct = f->func.f_fpf(s[j], &correct2);
349
350 // Per section 10 paragraph 6, accept any result if an input
351 // or output is a infinity or NaN or overflow
352 if (relaxedMode || skipNanInf)
353 {
354 if (skipNanInf && overflow[j]) continue;
355 // Note: no double rounding here. Reference functions
356 // calculate in single precision.
357 if (IsFloatInfinity(correct) || IsFloatNaN(correct)
358 || IsFloatInfinity(correct2) || IsFloatNaN(correct2)
359 || IsFloatInfinity(s[j]) || IsFloatNaN(s[j]))
360 continue;
361 }
362
363 typedef int (*CheckForSubnormal)(
364 double, float); // If we are in fast relaxed math, we
365 // have a different calculation for the
366 // subnormal threshold.
367 CheckForSubnormal isFloatResultSubnormalPtr;
368 if (relaxedMode)
369 {
370 err = Abs_Error(test, correct);
371 err2 = Abs_Error(test2, correct2);
372 isFloatResultSubnormalPtr =
373 &IsFloatResultSubnormalAbsError;
374 }
375 else
376 {
377 err = Ulp_Error(test, correct);
378 err2 = Ulp_Error(test2, correct2);
379 isFloatResultSubnormalPtr = &IsFloatResultSubnormal;
380 }
381 int fail = !(fabsf(err) <= float_ulps
382 && fabsf(err2) <= float_ulps);
383
384 if (ftz)
385 {
386 // retry per section 6.5.3.2
387 if ((*isFloatResultSubnormalPtr)(correct, float_ulps))
388 {
389 if ((*isFloatResultSubnormalPtr)(correct2,
390 float_ulps))
391 {
392 fail = fail && !(test == 0.0f && test2 == 0.0f);
393 if (!fail)
394 {
395 err = 0.0f;
396 err2 = 0.0f;
397 }
398 }
399 else
400 {
401 fail = fail
402 && !(test == 0.0f
403 && fabsf(err2) <= float_ulps);
404 if (!fail) err = 0.0f;
405 }
406 }
407 else if ((*isFloatResultSubnormalPtr)(correct2,
408 float_ulps))
409 {
410 fail = fail
411 && !(test2 == 0.0f && fabsf(err) <= float_ulps);
412 if (!fail) err2 = 0.0f;
413 }
414
415
416 // retry per section 6.5.3.3
417 if (IsFloatSubnormal(s[j]))
418 {
419 double correctp, correctn;
420 double correct2p, correct2n;
421 float errp, err2p, errn, err2n;
422
423 if (skipNanInf) feclearexcept(FE_OVERFLOW);
424 if (relaxedMode)
425 {
426 correctp = f->rfunc.f_fpf(0.0, &correct2p);
427 correctn = f->rfunc.f_fpf(-0.0, &correct2n);
428 }
429 else
430 {
431 correctp = f->func.f_fpf(0.0, &correct2p);
432 correctn = f->func.f_fpf(-0.0, &correct2n);
433 }
434
435 // Per section 10 paragraph 6, accept any result if
436 // an input or output is a infinity or NaN or
437 // overflow
438 if (skipNanInf)
439 {
440 if (fetestexcept(FE_OVERFLOW)) continue;
441
442 // Note: no double rounding here. Reference
443 // functions calculate in single precision.
444 if (IsFloatInfinity(correctp)
445 || IsFloatNaN(correctp)
446 || IsFloatInfinity(correctn)
447 || IsFloatNaN(correctn)
448 || IsFloatInfinity(correct2p)
449 || IsFloatNaN(correct2p)
450 || IsFloatInfinity(correct2n)
451 || IsFloatNaN(correct2n))
452 continue;
453 }
454
455 if (relaxedMode)
456 {
457 errp = Abs_Error(test, correctp);
458 err2p = Abs_Error(test, correct2p);
459 errn = Abs_Error(test, correctn);
460 err2n = Abs_Error(test, correct2n);
461 }
462 else
463 {
464 errp = Ulp_Error(test, correctp);
465 err2p = Ulp_Error(test, correct2p);
466 errn = Ulp_Error(test, correctn);
467 err2n = Ulp_Error(test, correct2n);
468 }
469
470 fail = fail
471 && ((!(fabsf(errp) <= float_ulps))
472 && (!(fabsf(err2p) <= float_ulps))
473 && ((!(fabsf(errn) <= float_ulps))
474 && (!(fabsf(err2n) <= float_ulps))));
475 if (fabsf(errp) < fabsf(err)) err = errp;
476 if (fabsf(errn) < fabsf(err)) err = errn;
477 if (fabsf(err2p) < fabsf(err2)) err2 = err2p;
478 if (fabsf(err2n) < fabsf(err2)) err2 = err2n;
479
480 // retry per section 6.5.3.4
481 if ((*isFloatResultSubnormalPtr)(correctp,
482 float_ulps)
483 || (*isFloatResultSubnormalPtr)(correctn,
484 float_ulps))
485 {
486 if ((*isFloatResultSubnormalPtr)(correct2p,
487 float_ulps)
488 || (*isFloatResultSubnormalPtr)(correct2n,
489 float_ulps))
490 {
491 fail = fail
492 && !(test == 0.0f && test2 == 0.0f);
493 if (!fail) err = err2 = 0.0f;
494 }
495 else
496 {
497 fail = fail
498 && !(test == 0.0f
499 && fabsf(err2) <= float_ulps);
500 if (!fail) err = 0.0f;
501 }
502 }
503 else if ((*isFloatResultSubnormalPtr)(correct2p,
504 float_ulps)
505 || (*isFloatResultSubnormalPtr)(
506 correct2n, float_ulps))
507 {
508 fail = fail
509 && !(test2 == 0.0f
510 && (fabsf(err) <= float_ulps));
511 if (!fail) err2 = 0.0f;
512 }
513 }
514 }
515 if (fabsf(err) > maxError0)
516 {
517 maxError0 = fabsf(err);
518 maxErrorVal0 = s[j];
519 }
520 if (fabsf(err2) > maxError1)
521 {
522 maxError1 = fabsf(err2);
523 maxErrorVal1 = s[j];
524 }
525 if (fail)
526 {
527 vlog_error("\nERROR: %s%s: {%f, %f} ulp error at %a: "
528 "*{%a, %a} vs. {%a, %a}\n",
529 f->name, sizeNames[k], err, err2,
530 ((float *)gIn)[j], ((float *)gOut_Ref)[j],
531 ((float *)gOut_Ref2)[j], test, test2);
532 error = -1;
533 goto exit;
534 }
535 }
536 }
537 }
538
539 if (isFract && gIsInRTZMode) (void)set_round(oldRoundMode, kfloat);
540
541 if (0 == (i & 0x0fffffff))
542 {
543 if (gVerboseBruteForce)
544 {
545 vlog("base:%14u step:%10zu bufferSize:%10zd \n", i, step,
546 BUFFER_SIZE);
547 }
548 else
549 {
550 vlog(".");
551 }
552 fflush(stdout);
553 }
554 }
555
556 if (!gSkipCorrectnessTesting)
557 {
558 if (gWimpyMode)
559 vlog("Wimp pass");
560 else
561 vlog("passed");
562
563 vlog("\t{%8.2f, %8.2f} @ {%a, %a}", maxError0, maxError1, maxErrorVal0,
564 maxErrorVal1);
565 }
566
567 vlog("\n");
568
569 exit:
570 // Release
571 for (auto k = gMinVectorSizeIndex; k < gMaxVectorSizeIndex; k++)
572 {
573 clReleaseKernel(kernels[k]);
574 clReleaseProgram(programs[k]);
575 }
576
577 return error;
578 }
579