1 //
2 // Copyright (c) 2017 The Khronos Group Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "function_list.h"
18 #include "test_functions.h"
19 #include "utility.h"
20
21 #include <cstring>
22
BuildKernel(const char * name,int vectorSize,cl_kernel * k,cl_program * p,bool relaxedMode)23 static int BuildKernel(const char *name, int vectorSize, cl_kernel *k,
24 cl_program *p, bool relaxedMode)
25 {
26 const char *c[] = { "#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n",
27 "__kernel void math_kernel",
28 sizeNames[vectorSize],
29 "( __global double",
30 sizeNames[vectorSize],
31 "* out, __global double",
32 sizeNames[vectorSize],
33 "* out2, __global double",
34 sizeNames[vectorSize],
35 "* in )\n"
36 "{\n"
37 " size_t i = get_global_id(0);\n"
38 " out[i] = ",
39 name,
40 "( in[i], out2 + i );\n"
41 "}\n" };
42
43 const char *c3[] = {
44 "#pragma OPENCL EXTENSION cl_khr_fp64 : enable\n",
45 "__kernel void math_kernel",
46 sizeNames[vectorSize],
47 "( __global double* out, __global double* out2, __global double* in)\n"
48 "{\n"
49 " size_t i = get_global_id(0);\n"
50 " if( i + 1 < get_global_size(0) )\n"
51 " {\n"
52 " double3 f0 = vload3( 0, in + 3 * i );\n"
53 " double3 iout = NAN;\n"
54 " f0 = ",
55 name,
56 "( f0, &iout );\n"
57 " vstore3( f0, 0, out + 3*i );\n"
58 " vstore3( iout, 0, out2 + 3*i );\n"
59 " }\n"
60 " else\n"
61 " {\n"
62 " size_t parity = i & 1; // Figure out how many elements are "
63 "left over after BUFFER_SIZE % (3*sizeof(float)). Assume power of two "
64 "buffer size \n"
65 " double3 iout = NAN;\n"
66 " double3 f0;\n"
67 " switch( parity )\n"
68 " {\n"
69 " case 1:\n"
70 " f0 = (double3)( in[3*i], NAN, NAN ); \n"
71 " break;\n"
72 " case 0:\n"
73 " f0 = (double3)( in[3*i], in[3*i+1], NAN ); \n"
74 " break;\n"
75 " }\n"
76 " f0 = ",
77 name,
78 "( f0, &iout );\n"
79 " switch( parity )\n"
80 " {\n"
81 " case 0:\n"
82 " out[3*i+1] = f0.y; \n"
83 " out2[3*i+1] = iout.y; \n"
84 " // fall through\n"
85 " case 1:\n"
86 " out[3*i] = f0.x; \n"
87 " out2[3*i] = iout.x; \n"
88 " break;\n"
89 " }\n"
90 " }\n"
91 "}\n"
92 };
93
94 const char **kern = c;
95 size_t kernSize = sizeof(c) / sizeof(c[0]);
96
97 if (sizeValues[vectorSize] == 3)
98 {
99 kern = c3;
100 kernSize = sizeof(c3) / sizeof(c3[0]);
101 }
102
103 char testName[32];
104 snprintf(testName, sizeof(testName) - 1, "math_kernel%s",
105 sizeNames[vectorSize]);
106
107 return MakeKernel(kern, (cl_uint)kernSize, testName, k, p, relaxedMode);
108 }
109
110 typedef struct BuildKernelInfo
111 {
112 cl_uint offset; // the first vector size to build
113 cl_kernel *kernels;
114 cl_program *programs;
115 const char *nameInCode;
116 bool relaxedMode; // Whether to build with -cl-fast-relaxed-math.
117 } BuildKernelInfo;
118
BuildKernelFn(cl_uint job_id,cl_uint thread_id UNUSED,void * p)119 static cl_int BuildKernelFn(cl_uint job_id, cl_uint thread_id UNUSED, void *p)
120 {
121 BuildKernelInfo *info = (BuildKernelInfo *)p;
122 cl_uint i = info->offset + job_id;
123 return BuildKernel(info->nameInCode, i, info->kernels + i,
124 info->programs + i, info->relaxedMode);
125 }
126
TestFunc_Double2_Double(const Func * f,MTdata d,bool relaxedMode)127 int TestFunc_Double2_Double(const Func *f, MTdata d, bool relaxedMode)
128 {
129 int error;
130 cl_program programs[VECTOR_SIZE_COUNT];
131 cl_kernel kernels[VECTOR_SIZE_COUNT];
132 float maxError0 = 0.0f;
133 float maxError1 = 0.0f;
134 int ftz = f->ftz || gForceFTZ;
135 double maxErrorVal0 = 0.0f;
136 double maxErrorVal1 = 0.0f;
137 uint64_t step = getTestStep(sizeof(cl_double), BUFFER_SIZE);
138 int scale =
139 (int)((1ULL << 32) / (16 * BUFFER_SIZE / sizeof(cl_double)) + 1);
140
141 logFunctionInfo(f->name, sizeof(cl_double), relaxedMode);
142
143 Force64BitFPUPrecision();
144
145 // Init the kernels
146 {
147 BuildKernelInfo build_info = { gMinVectorSizeIndex, kernels, programs,
148 f->nameInCode, relaxedMode };
149 if ((error = ThreadPool_Do(BuildKernelFn,
150 gMaxVectorSizeIndex - gMinVectorSizeIndex,
151 &build_info)))
152 return error;
153 }
154
155 for (uint64_t i = 0; i < (1ULL << 32); i += step)
156 {
157 // Init input array
158 double *p = (double *)gIn;
159 if (gWimpyMode)
160 {
161 for (size_t j = 0; j < BUFFER_SIZE / sizeof(cl_double); j++)
162 p[j] = DoubleFromUInt32((uint32_t)i + j * scale);
163 }
164 else
165 {
166 for (size_t j = 0; j < BUFFER_SIZE / sizeof(cl_double); j++)
167 p[j] = DoubleFromUInt32((uint32_t)i + j);
168 }
169 if ((error = clEnqueueWriteBuffer(gQueue, gInBuffer, CL_FALSE, 0,
170 BUFFER_SIZE, gIn, 0, NULL, NULL)))
171 {
172 vlog_error("\n*** Error %d in clEnqueueWriteBuffer ***\n", error);
173 return error;
174 }
175
176 // write garbage into output arrays
177 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
178 {
179 uint32_t pattern = 0xffffdead;
180 memset_pattern4(gOut[j], &pattern, BUFFER_SIZE);
181 if ((error =
182 clEnqueueWriteBuffer(gQueue, gOutBuffer[j], CL_FALSE, 0,
183 BUFFER_SIZE, gOut[j], 0, NULL, NULL)))
184 {
185 vlog_error("\n*** Error %d in clEnqueueWriteBuffer2(%d) ***\n",
186 error, j);
187 goto exit;
188 }
189
190 memset_pattern4(gOut2[j], &pattern, BUFFER_SIZE);
191 if ((error = clEnqueueWriteBuffer(gQueue, gOutBuffer2[j], CL_FALSE,
192 0, BUFFER_SIZE, gOut2[j], 0, NULL,
193 NULL)))
194 {
195 vlog_error("\n*** Error %d in clEnqueueWriteBuffer2b(%d) ***\n",
196 error, j);
197 goto exit;
198 }
199 }
200
201 // Run the kernels
202 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
203 {
204 size_t vectorSize = sizeValues[j] * sizeof(cl_double);
205 size_t localCount = (BUFFER_SIZE + vectorSize - 1) / vectorSize;
206 if ((error = clSetKernelArg(kernels[j], 0, sizeof(gOutBuffer[j]),
207 &gOutBuffer[j])))
208 {
209 LogBuildError(programs[j]);
210 goto exit;
211 }
212 if ((error = clSetKernelArg(kernels[j], 1, sizeof(gOutBuffer2[j]),
213 &gOutBuffer2[j])))
214 {
215 LogBuildError(programs[j]);
216 goto exit;
217 }
218 if ((error = clSetKernelArg(kernels[j], 2, sizeof(gInBuffer),
219 &gInBuffer)))
220 {
221 LogBuildError(programs[j]);
222 goto exit;
223 }
224
225 if ((error =
226 clEnqueueNDRangeKernel(gQueue, kernels[j], 1, NULL,
227 &localCount, NULL, 0, NULL, NULL)))
228 {
229 vlog_error("FAILED -- could not execute kernel\n");
230 goto exit;
231 }
232 }
233
234 // Get that moving
235 if ((error = clFlush(gQueue))) vlog("clFlush failed\n");
236
237 // Calculate the correctly rounded reference result
238 double *r = (double *)gOut_Ref;
239 double *r2 = (double *)gOut_Ref2;
240 double *s = (double *)gIn;
241 for (size_t j = 0; j < BUFFER_SIZE / sizeof(cl_double); j++)
242 {
243 long double dd;
244 r[j] = (double)f->dfunc.f_fpf(s[j], &dd);
245 r2[j] = (double)dd;
246 }
247
248 // Read the data back
249 for (auto j = gMinVectorSizeIndex; j < gMaxVectorSizeIndex; j++)
250 {
251 if ((error =
252 clEnqueueReadBuffer(gQueue, gOutBuffer[j], CL_TRUE, 0,
253 BUFFER_SIZE, gOut[j], 0, NULL, NULL)))
254 {
255 vlog_error("ReadArray failed %d\n", error);
256 goto exit;
257 }
258 if ((error =
259 clEnqueueReadBuffer(gQueue, gOutBuffer2[j], CL_TRUE, 0,
260 BUFFER_SIZE, gOut2[j], 0, NULL, NULL)))
261 {
262 vlog_error("ReadArray2 failed %d\n", error);
263 goto exit;
264 }
265 }
266
267 if (gSkipCorrectnessTesting) break;
268
269 // Verify data
270 uint64_t *t = (uint64_t *)gOut_Ref;
271 uint64_t *t2 = (uint64_t *)gOut_Ref2;
272 for (size_t j = 0; j < BUFFER_SIZE / sizeof(double); j++)
273 {
274 for (auto k = gMinVectorSizeIndex; k < gMaxVectorSizeIndex; k++)
275 {
276 uint64_t *q = (uint64_t *)(gOut[k]);
277 uint64_t *q2 = (uint64_t *)(gOut2[k]);
278
279 // If we aren't getting the correctly rounded result
280 if (t[j] != q[j] || t2[j] != q2[j])
281 {
282 double test = ((double *)q)[j];
283 double test2 = ((double *)q2)[j];
284 long double correct2;
285 long double correct = f->dfunc.f_fpf(s[j], &correct2);
286 float err = Bruteforce_Ulp_Error_Double(test, correct);
287 float err2 = Bruteforce_Ulp_Error_Double(test2, correct2);
288 int fail = !(fabsf(err) <= f->double_ulps
289 && fabsf(err2) <= f->double_ulps);
290 if (ftz)
291 {
292 // retry per section 6.5.3.2
293 if (IsDoubleResultSubnormal(correct, f->double_ulps))
294 {
295 if (IsDoubleResultSubnormal(correct2,
296 f->double_ulps))
297 {
298 fail = fail && !(test == 0.0f && test2 == 0.0f);
299 if (!fail)
300 {
301 err = 0.0f;
302 err2 = 0.0f;
303 }
304 }
305 else
306 {
307 fail = fail
308 && !(test == 0.0f
309 && fabsf(err2) <= f->double_ulps);
310 if (!fail) err = 0.0f;
311 }
312 }
313 else if (IsDoubleResultSubnormal(correct2,
314 f->double_ulps))
315 {
316 fail = fail
317 && !(test2 == 0.0f
318 && fabsf(err) <= f->double_ulps);
319 if (!fail) err2 = 0.0f;
320 }
321
322 // retry per section 6.5.3.3
323 if (IsDoubleSubnormal(s[j]))
324 {
325 long double correct2p, correct2n;
326 long double correctp =
327 f->dfunc.f_fpf(0.0, &correct2p);
328 long double correctn =
329 f->dfunc.f_fpf(-0.0, &correct2n);
330 float errp =
331 Bruteforce_Ulp_Error_Double(test, correctp);
332 float err2p =
333 Bruteforce_Ulp_Error_Double(test, correct2p);
334 float errn =
335 Bruteforce_Ulp_Error_Double(test, correctn);
336 float err2n =
337 Bruteforce_Ulp_Error_Double(test, correct2n);
338 fail = fail
339 && ((!(fabsf(errp) <= f->double_ulps))
340 && (!(fabsf(err2p) <= f->double_ulps))
341 && ((!(fabsf(errn) <= f->double_ulps))
342 && (!(fabsf(err2n)
343 <= f->double_ulps))));
344 if (fabsf(errp) < fabsf(err)) err = errp;
345 if (fabsf(errn) < fabsf(err)) err = errn;
346 if (fabsf(err2p) < fabsf(err2)) err2 = err2p;
347 if (fabsf(err2n) < fabsf(err2)) err2 = err2n;
348
349 // retry per section 6.5.3.4
350 if (IsDoubleResultSubnormal(correctp,
351 f->double_ulps)
352 || IsDoubleResultSubnormal(correctn,
353 f->double_ulps))
354 {
355 if (IsDoubleResultSubnormal(correct2p,
356 f->double_ulps)
357 || IsDoubleResultSubnormal(correct2n,
358 f->double_ulps))
359 {
360 fail = fail
361 && !(test == 0.0f && test2 == 0.0f);
362 if (!fail) err = err2 = 0.0f;
363 }
364 else
365 {
366 fail = fail
367 && !(test == 0.0f
368 && fabsf(err2) <= f->double_ulps);
369 if (!fail) err = 0.0f;
370 }
371 }
372 else if (IsDoubleResultSubnormal(correct2p,
373 f->double_ulps)
374 || IsDoubleResultSubnormal(correct2n,
375 f->double_ulps))
376 {
377 fail = fail
378 && !(test2 == 0.0f
379 && (fabsf(err) <= f->double_ulps));
380 if (!fail) err2 = 0.0f;
381 }
382 }
383 }
384 if (fabsf(err) > maxError0)
385 {
386 maxError0 = fabsf(err);
387 maxErrorVal0 = s[j];
388 }
389 if (fabsf(err2) > maxError1)
390 {
391 maxError1 = fabsf(err2);
392 maxErrorVal1 = s[j];
393 }
394 if (fail)
395 {
396 vlog_error(
397 "\nERROR: %sD%s: {%f, %f} ulp error at %.13la: "
398 "*{%.13la, %.13la} vs. {%.13la, %.13la}\n",
399 f->name, sizeNames[k], err, err2,
400 ((double *)gIn)[j], ((double *)gOut_Ref)[j],
401 ((double *)gOut_Ref2)[j], test, test2);
402 error = -1;
403 goto exit;
404 }
405 }
406 }
407 }
408
409 if (0 == (i & 0x0fffffff))
410 {
411 if (gVerboseBruteForce)
412 {
413 vlog("base:%14u step:%10zu bufferSize:%10zd \n", i, step,
414 BUFFER_SIZE);
415 }
416 else
417 {
418 vlog(".");
419 }
420 fflush(stdout);
421 }
422 }
423
424 if (!gSkipCorrectnessTesting)
425 {
426 if (gWimpyMode)
427 vlog("Wimp pass");
428 else
429 vlog("passed");
430
431 vlog("\t{%8.2f, %8.2f} @ {%a, %a}", maxError0, maxError1, maxErrorVal0,
432 maxErrorVal1);
433 }
434
435 vlog("\n");
436
437 exit:
438 // Release
439 for (auto k = gMinVectorSizeIndex; k < gMaxVectorSizeIndex; k++)
440 {
441 clReleaseKernel(kernels[k]);
442 clReleaseProgram(programs[k]);
443 }
444
445 return error;
446 }
447