1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "rsdAllocation.h"
18 #include "rsdCore.h"
19
20 #include <android/native_window.h>
21
22 #ifdef RS_COMPATIBILITY_LIB
23 #include "rsCompatibilityLib.h"
24 #else
25 #include "rsdFrameBufferObj.h"
26 #include <vndk/window.h>
27
28 #include <GLES/gl.h>
29 #include <GLES2/gl2.h>
30 #include <GLES/glext.h>
31 #endif
32
33 using android::renderscript::Allocation;
34 using android::renderscript::Context;
35 using android::renderscript::Element;
36 using android::renderscript::Type;
37 using android::renderscript::rs_allocation;
38 using android::renderscript::rsBoxFilter565;
39 using android::renderscript::rsBoxFilter8888;
40 using android::renderscript::rsMax;
41 using android::renderscript::rsRound;
42
43 #ifndef RS_COMPATIBILITY_LIB
44 const static GLenum gFaceOrder[] = {
45 GL_TEXTURE_CUBE_MAP_POSITIVE_X,
46 GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
47 GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
48 GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
49 GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
50 GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
51 };
52
rsdTypeToGLType(RsDataType t)53 GLenum rsdTypeToGLType(RsDataType t) {
54 switch (t) {
55 case RS_TYPE_UNSIGNED_5_6_5: return GL_UNSIGNED_SHORT_5_6_5;
56 case RS_TYPE_UNSIGNED_5_5_5_1: return GL_UNSIGNED_SHORT_5_5_5_1;
57 case RS_TYPE_UNSIGNED_4_4_4_4: return GL_UNSIGNED_SHORT_4_4_4_4;
58
59 //case RS_TYPE_FLOAT_16: return GL_HALF_FLOAT;
60 case RS_TYPE_FLOAT_32: return GL_FLOAT;
61 case RS_TYPE_UNSIGNED_8: return GL_UNSIGNED_BYTE;
62 case RS_TYPE_UNSIGNED_16: return GL_UNSIGNED_SHORT;
63 case RS_TYPE_SIGNED_8: return GL_BYTE;
64 case RS_TYPE_SIGNED_16: return GL_SHORT;
65 default: break;
66 }
67 return 0;
68 }
69
rsdKindToGLFormat(RsDataKind k)70 GLenum rsdKindToGLFormat(RsDataKind k) {
71 switch (k) {
72 case RS_KIND_PIXEL_L: return GL_LUMINANCE;
73 case RS_KIND_PIXEL_A: return GL_ALPHA;
74 case RS_KIND_PIXEL_LA: return GL_LUMINANCE_ALPHA;
75 case RS_KIND_PIXEL_RGB: return GL_RGB;
76 case RS_KIND_PIXEL_RGBA: return GL_RGBA;
77 case RS_KIND_PIXEL_DEPTH: return GL_DEPTH_COMPONENT16;
78 default: break;
79 }
80 return 0;
81 }
82 #endif
83
GetOffsetPtr(const android::renderscript::Allocation * alloc,uint32_t xoff,uint32_t yoff,uint32_t zoff,uint32_t lod,RsAllocationCubemapFace face)84 uint8_t *GetOffsetPtr(const android::renderscript::Allocation *alloc,
85 uint32_t xoff, uint32_t yoff, uint32_t zoff,
86 uint32_t lod, RsAllocationCubemapFace face) {
87 uint8_t *ptr = (uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
88 ptr += face * alloc->mHal.drvState.faceOffset;
89 ptr += zoff * alloc->mHal.drvState.lod[lod].dimY * alloc->mHal.drvState.lod[lod].stride;
90 ptr += yoff * alloc->mHal.drvState.lod[lod].stride;
91 ptr += xoff * alloc->mHal.state.elementSizeBytes;
92 return ptr;
93 }
94
95
Update2DTexture(const Context * rsc,const Allocation * alloc,const void * ptr,uint32_t xoff,uint32_t yoff,uint32_t lod,RsAllocationCubemapFace face,uint32_t w,uint32_t h)96 static void Update2DTexture(const Context *rsc, const Allocation *alloc, const void *ptr,
97 uint32_t xoff, uint32_t yoff, uint32_t lod,
98 RsAllocationCubemapFace face, uint32_t w, uint32_t h) {
99 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
100 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
101
102 rsAssert(drv->textureID);
103 RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
104 RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
105 GLenum t = GL_TEXTURE_2D;
106 if (alloc->mHal.state.hasFaces) {
107 t = gFaceOrder[face];
108 }
109 RSD_CALL_GL(glTexSubImage2D, t, lod, xoff, yoff, w, h, drv->glFormat, drv->glType, ptr);
110 #endif
111 }
112
113
114 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
Upload2DTexture(const Context * rsc,const Allocation * alloc,bool isFirstUpload)115 static void Upload2DTexture(const Context *rsc, const Allocation *alloc, bool isFirstUpload) {
116 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
117
118 RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
119 RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
120
121 uint32_t faceCount = 1;
122 if (alloc->mHal.state.hasFaces) {
123 faceCount = 6;
124 }
125
126 rsdGLCheckError(rsc, "Upload2DTexture 1 ");
127 for (uint32_t face = 0; face < faceCount; face ++) {
128 for (uint32_t lod = 0; lod < alloc->mHal.state.type->getLODCount(); lod++) {
129 const uint8_t *p = GetOffsetPtr(alloc, 0, 0, 0, lod, (RsAllocationCubemapFace)face);
130
131 GLenum t = GL_TEXTURE_2D;
132 if (alloc->mHal.state.hasFaces) {
133 t = gFaceOrder[face];
134 }
135
136 if (isFirstUpload) {
137 RSD_CALL_GL(glTexImage2D, t, lod, drv->glFormat,
138 alloc->mHal.state.type->getLODDimX(lod),
139 alloc->mHal.state.type->getLODDimY(lod),
140 0, drv->glFormat, drv->glType, p);
141 } else {
142 RSD_CALL_GL(glTexSubImage2D, t, lod, 0, 0,
143 alloc->mHal.state.type->getLODDimX(lod),
144 alloc->mHal.state.type->getLODDimY(lod),
145 drv->glFormat, drv->glType, p);
146 }
147 }
148 }
149
150 if (alloc->mHal.state.mipmapControl == RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE) {
151 RSD_CALL_GL(glGenerateMipmap, drv->glTarget);
152 }
153 rsdGLCheckError(rsc, "Upload2DTexture");
154 }
155 #endif
156
UploadToTexture(const Context * rsc,const Allocation * alloc)157 static void UploadToTexture(const Context *rsc, const Allocation *alloc) {
158 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
159 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
160
161 if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
162 if (!drv->textureID) {
163 RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
164 }
165 return;
166 }
167
168 if (!drv->glType || !drv->glFormat) {
169 return;
170 }
171
172 if (!alloc->mHal.drvState.lod[0].mallocPtr) {
173 return;
174 }
175
176 bool isFirstUpload = false;
177
178 if (!drv->textureID) {
179 RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
180 isFirstUpload = true;
181 }
182
183 Upload2DTexture(rsc, alloc, isFirstUpload);
184
185 if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
186 if (alloc->mHal.drvState.lod[0].mallocPtr) {
187 free(alloc->mHal.drvState.lod[0].mallocPtr);
188 alloc->mHal.drvState.lod[0].mallocPtr = nullptr;
189 }
190 }
191 rsdGLCheckError(rsc, "UploadToTexture");
192 #endif
193 }
194
AllocateRenderTarget(const Context * rsc,const Allocation * alloc)195 static void AllocateRenderTarget(const Context *rsc, const Allocation *alloc) {
196 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
197 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
198
199 if (!drv->glFormat) {
200 return;
201 }
202
203 if (!drv->renderTargetID) {
204 RSD_CALL_GL(glGenRenderbuffers, 1, &drv->renderTargetID);
205
206 if (!drv->renderTargetID) {
207 // This should generally not happen
208 ALOGE("allocateRenderTarget failed to gen mRenderTargetID");
209 rsc->dumpDebug();
210 return;
211 }
212 RSD_CALL_GL(glBindRenderbuffer, GL_RENDERBUFFER, drv->renderTargetID);
213 RSD_CALL_GL(glRenderbufferStorage, GL_RENDERBUFFER, drv->glFormat,
214 alloc->mHal.drvState.lod[0].dimX, alloc->mHal.drvState.lod[0].dimY);
215 }
216 rsdGLCheckError(rsc, "AllocateRenderTarget");
217 #endif
218 }
219
UploadToBufferObject(const Context * rsc,const Allocation * alloc)220 static void UploadToBufferObject(const Context *rsc, const Allocation *alloc) {
221 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
222 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
223
224 rsAssert(!alloc->mHal.state.type->getDimY());
225 rsAssert(!alloc->mHal.state.type->getDimZ());
226
227 //alloc->mHal.state.usageFlags |= RS_ALLOCATION_USAGE_GRAPHICS_VERTEX;
228
229 if (!drv->bufferID) {
230 RSD_CALL_GL(glGenBuffers, 1, &drv->bufferID);
231 }
232 if (!drv->bufferID) {
233 ALOGE("Upload to buffer object failed");
234 drv->uploadDeferred = true;
235 return;
236 }
237 RSD_CALL_GL(glBindBuffer, drv->glTarget, drv->bufferID);
238 RSD_CALL_GL(glBufferData, drv->glTarget,
239 alloc->mHal.state.type->getPackedSizeBytes(),
240 alloc->mHal.drvState.lod[0].mallocPtr, GL_DYNAMIC_DRAW);
241 RSD_CALL_GL(glBindBuffer, drv->glTarget, 0);
242 rsdGLCheckError(rsc, "UploadToBufferObject");
243 #endif
244 }
245
246
DeriveYUVLayout(int yuv,Allocation::Hal::DrvState * state)247 static size_t DeriveYUVLayout(int yuv, Allocation::Hal::DrvState *state) {
248 #ifndef RS_COMPATIBILITY_LIB
249 // For the flexible YCbCr format, layout is initialized during call to
250 // Allocation::ioReceive. Return early and avoid clobberring any
251 // pre-existing layout.
252 if (yuv == RS_YUV_420_888) {
253 return 0;
254 }
255 #endif
256
257 // YUV only supports basic 2d
258 // so we can stash the plane pointers in the mipmap levels.
259 size_t uvSize = 0;
260 state->lod[1].dimX = state->lod[0].dimX / 2;
261 state->lod[1].dimY = state->lod[0].dimY / 2;
262 state->lod[2].dimX = state->lod[0].dimX / 2;
263 state->lod[2].dimY = state->lod[0].dimY / 2;
264 state->yuv.shift = 1;
265 state->yuv.step = 1;
266 state->lodCount = 3;
267
268 switch(yuv) {
269 case RS_YUV_YV12:
270 state->lod[2].stride = rsRound(state->lod[0].stride >> 1, 16);
271 state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
272 (state->lod[0].stride * state->lod[0].dimY);
273 uvSize += state->lod[2].stride * state->lod[2].dimY;
274
275 state->lod[1].stride = state->lod[2].stride;
276 state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) +
277 (state->lod[2].stride * state->lod[2].dimY);
278 uvSize += state->lod[1].stride * state->lod[2].dimY;
279 break;
280 case RS_YUV_NV21:
281 //state->lod[1].dimX = state->lod[0].dimX;
282 state->lod[1].stride = state->lod[0].stride;
283 state->lod[2].stride = state->lod[0].stride;
284 state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
285 (state->lod[0].stride * state->lod[0].dimY);
286 state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) + 1;
287 uvSize += state->lod[1].stride * state->lod[1].dimY;
288 state->yuv.step = 2;
289 break;
290 default:
291 rsAssert(0);
292 }
293 return uvSize;
294 }
295
AllocationBuildPointerTable(const Context * rsc,const Allocation * alloc,const Type * type,uint8_t * ptr,size_t requiredAlignment)296 static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
297 const Type *type, uint8_t *ptr, size_t requiredAlignment) {
298 alloc->mHal.drvState.lod[0].dimX = type->getDimX();
299 alloc->mHal.drvState.lod[0].dimY = type->getDimY();
300 alloc->mHal.drvState.lod[0].dimZ = type->getDimZ();
301 alloc->mHal.drvState.lod[0].mallocPtr = 0;
302 // Stride needs to be aligned to a boundary defined by requiredAlignment!
303 size_t stride = alloc->mHal.drvState.lod[0].dimX * type->getElementSizeBytes();
304 alloc->mHal.drvState.lod[0].stride = rsRound(stride, requiredAlignment);
305 alloc->mHal.drvState.lodCount = type->getLODCount();
306 alloc->mHal.drvState.faceCount = type->getDimFaces();
307
308 size_t offsets[Allocation::MAX_LOD];
309 memset(offsets, 0, sizeof(offsets));
310
311 size_t o = alloc->mHal.drvState.lod[0].stride * rsMax(alloc->mHal.drvState.lod[0].dimY, 1u) *
312 rsMax(alloc->mHal.drvState.lod[0].dimZ, 1u);
313 if (alloc->mHal.state.yuv) {
314 o += DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
315
316 for (uint32_t ct = 1; ct < alloc->mHal.drvState.lodCount; ct++) {
317 offsets[ct] = (size_t)alloc->mHal.drvState.lod[ct].mallocPtr;
318 }
319 } else if(alloc->mHal.drvState.lodCount > 1) {
320 uint32_t tx = alloc->mHal.drvState.lod[0].dimX;
321 uint32_t ty = alloc->mHal.drvState.lod[0].dimY;
322 uint32_t tz = alloc->mHal.drvState.lod[0].dimZ;
323 for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
324 alloc->mHal.drvState.lod[lod].dimX = tx;
325 alloc->mHal.drvState.lod[lod].dimY = ty;
326 alloc->mHal.drvState.lod[lod].dimZ = tz;
327 alloc->mHal.drvState.lod[lod].stride =
328 rsRound(tx * type->getElementSizeBytes(), requiredAlignment);
329 offsets[lod] = o;
330 o += alloc->mHal.drvState.lod[lod].stride * rsMax(ty, 1u) * rsMax(tz, 1u);
331 if (tx > 1) tx >>= 1;
332 if (ty > 1) ty >>= 1;
333 if (tz > 1) tz >>= 1;
334 }
335 }
336
337 alloc->mHal.drvState.faceOffset = o;
338
339 alloc->mHal.drvState.lod[0].mallocPtr = ptr;
340 for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
341 alloc->mHal.drvState.lod[lod].mallocPtr = ptr + offsets[lod];
342 }
343
344 size_t allocSize = alloc->mHal.drvState.faceOffset;
345 if(alloc->mHal.drvState.faceCount) {
346 allocSize *= 6;
347 }
348
349 return allocSize;
350 }
351
AllocationBuildPointerTable(const Context * rsc,const Allocation * alloc,const Type * type,uint8_t * ptr)352 static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
353 const Type *type, uint8_t *ptr) {
354 return AllocationBuildPointerTable(rsc, alloc, type, ptr, Allocation::kMinimumRSAlignment);
355 }
356
allocAlignedMemory(size_t allocSize,bool forceZero,size_t requiredAlignment)357 static uint8_t* allocAlignedMemory(size_t allocSize, bool forceZero, size_t requiredAlignment) {
358 // We align all allocations to a boundary defined by requiredAlignment.
359 uint8_t* ptr = (uint8_t *)memalign(requiredAlignment, allocSize);
360 if (!ptr) {
361 return nullptr;
362 }
363 if (forceZero) {
364 memset(ptr, 0, allocSize);
365 }
366 return ptr;
367 }
368
rsdAllocationInitStrided(const Context * rsc,Allocation * alloc,bool forceZero,size_t requiredAlignment)369 bool rsdAllocationInitStrided(const Context *rsc, Allocation *alloc, bool forceZero, size_t requiredAlignment) {
370 DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
371 if (!drv) {
372 return false;
373 }
374 alloc->mHal.drv = drv;
375
376 // Check if requiredAlignment is power of 2, also requiredAlignment should be larger or equal than kMinimumRSAlignment.
377 if ((requiredAlignment & (requiredAlignment-1)) != 0 || requiredAlignment < Allocation::kMinimumRSAlignment) {
378 ALOGE("requiredAlignment must be power of 2");
379 return false;
380 }
381 // Calculate the object size.
382 size_t allocSize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), nullptr, requiredAlignment);
383
384 uint8_t * ptr = nullptr;
385 if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) {
386
387 } else if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
388 // Allocation is allocated when the surface is created
389 // in getSurface
390 #ifdef RS_COMPATIBILITY_LIB
391 } else if (alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_INCREMENTAL_SUPPORT | RS_ALLOCATION_USAGE_SHARED)) {
392 if (alloc->mHal.state.userProvidedPtr == nullptr) {
393 ALOGE("User-backed buffer pointer cannot be null");
394 return false;
395 }
396 if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
397 ALOGE("User-allocated buffers must not have multiple faces or LODs");
398 return false;
399 }
400
401 drv->useUserProvidedPtr = true;
402 ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
403 #endif
404 } else if (alloc->mHal.state.userProvidedPtr != nullptr) {
405 // user-provided allocation
406 // limitations: no faces, no LOD, USAGE_SCRIPT or SCRIPT+TEXTURE only
407 if (!(alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED) ||
408 alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED | RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE))) {
409 ALOGE("Can't use user-allocated buffers if usage is not USAGE_SCRIPT | USAGE_SHARED or USAGE_SCRIPT | USAGE_SHARED | USAGE_GRAPHICS_TEXTURE");
410 return false;
411 }
412 if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
413 ALOGE("User-allocated buffers must not have multiple faces or LODs");
414 return false;
415 }
416
417 // rows must be aligned based on requiredAlignment.
418 // validate that here, otherwise fall back to not use the user-backed allocation
419 if (((alloc->getType()->getDimX() * alloc->getType()->getElement()->getSizeBytes()) % requiredAlignment) != 0) {
420 ALOGV("User-backed allocation failed stride requirement, falling back to separate allocation");
421 drv->useUserProvidedPtr = false;
422
423 ptr = allocAlignedMemory(allocSize, forceZero, requiredAlignment);
424 if (!ptr) {
425 alloc->mHal.drv = nullptr;
426 free(drv);
427 return false;
428 }
429
430 } else {
431 drv->useUserProvidedPtr = true;
432 ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
433 }
434 } else {
435 ptr = allocAlignedMemory(allocSize, forceZero, requiredAlignment);
436 if (!ptr) {
437 alloc->mHal.drv = nullptr;
438 free(drv);
439 return false;
440 }
441 }
442 // Build the pointer tables
443 size_t verifySize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), ptr, requiredAlignment);
444 if(allocSize != verifySize) {
445 rsAssert(!"Size mismatch");
446 }
447
448 drv->glTarget = GL_NONE;
449 if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
450 if (alloc->mHal.state.hasFaces) {
451 drv->glTarget = GL_TEXTURE_CUBE_MAP;
452 } else {
453 drv->glTarget = GL_TEXTURE_2D;
454 }
455 } else {
456 if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
457 drv->glTarget = GL_ARRAY_BUFFER;
458 }
459 }
460
461 #ifndef RS_COMPATIBILITY_LIB
462 drv->glType = rsdTypeToGLType(alloc->mHal.state.type->getElement()->getComponent().getType());
463 drv->glFormat = rsdKindToGLFormat(alloc->mHal.state.type->getElement()->getComponent().getKind());
464 #else
465 drv->glType = 0;
466 drv->glFormat = 0;
467 #endif
468
469 if (alloc->mHal.state.usageFlags & ~RS_ALLOCATION_USAGE_SCRIPT) {
470 drv->uploadDeferred = true;
471 }
472
473 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
474 drv->readBackFBO = nullptr;
475 #endif
476
477 // fill out the initial state of the buffer if we couldn't use the user-provided ptr and USAGE_SHARED was accepted
478 if ((alloc->mHal.state.userProvidedPtr != 0) && (drv->useUserProvidedPtr == false)) {
479 rsdAllocationData2D(rsc, alloc, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X, alloc->getType()->getDimX(), alloc->getType()->getDimY(), alloc->mHal.state.userProvidedPtr, allocSize, 0);
480 }
481
482
483 #ifdef RS_FIND_OFFSETS
484 ALOGE("pointer for allocation: %p", alloc);
485 ALOGE("pointer for allocation.drv: %p", &alloc->mHal.drv);
486 #endif
487
488
489 return true;
490 }
491
rsdAllocationInit(const Context * rsc,Allocation * alloc,bool forceZero)492 bool rsdAllocationInit(const Context *rsc, Allocation *alloc, bool forceZero) {
493 return rsdAllocationInitStrided(rsc, alloc, forceZero, Allocation::kMinimumRSAlignment);
494 }
495
rsdAllocationAdapterOffset(const Context * rsc,const Allocation * alloc)496 void rsdAllocationAdapterOffset(const Context *rsc, const Allocation *alloc) {
497 //ALOGE("rsdAllocationAdapterOffset");
498
499 // Get a base pointer to the new LOD
500 const Allocation *base = alloc->mHal.state.baseAlloc;
501 const Type *type = alloc->mHal.state.type;
502 if (base == nullptr) {
503 return;
504 }
505
506 //ALOGE("rsdAllocationAdapterOffset %p %p", ptrA, ptrB);
507 //ALOGE("rsdAllocationAdapterOffset lodCount %i", alloc->mHal.drvState.lodCount);
508
509 const int lodBias = alloc->mHal.state.originLOD;
510 uint32_t lodCount = rsMax(alloc->mHal.drvState.lodCount, (uint32_t)1);
511 for (uint32_t lod=0; lod < lodCount; lod++) {
512 alloc->mHal.drvState.lod[lod] = base->mHal.drvState.lod[lod + lodBias];
513 alloc->mHal.drvState.lod[lod].mallocPtr = GetOffsetPtr(alloc,
514 alloc->mHal.state.originX, alloc->mHal.state.originY, alloc->mHal.state.originZ,
515 lodBias, (RsAllocationCubemapFace)alloc->mHal.state.originFace);
516 }
517 }
518
rsdAllocationAdapterInit(const Context * rsc,Allocation * alloc)519 bool rsdAllocationAdapterInit(const Context *rsc, Allocation *alloc) {
520 DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
521 if (!drv) {
522 return false;
523 }
524 alloc->mHal.drv = drv;
525
526 // We need to build an allocation that looks like a subset of the parent allocation
527 rsdAllocationAdapterOffset(rsc, alloc);
528
529 return true;
530 }
531
rsdAllocationDestroy(const Context * rsc,Allocation * alloc)532 void rsdAllocationDestroy(const Context *rsc, Allocation *alloc) {
533 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
534
535 if (alloc->mHal.state.baseAlloc == nullptr) {
536 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
537 if (drv->bufferID) {
538 // Causes a SW crash....
539 //ALOGV(" mBufferID %i", mBufferID);
540 //glDeleteBuffers(1, &mBufferID);
541 //mBufferID = 0;
542 }
543 if (drv->textureID) {
544 RSD_CALL_GL(glDeleteTextures, 1, &drv->textureID);
545 drv->textureID = 0;
546 }
547 if (drv->renderTargetID) {
548 RSD_CALL_GL(glDeleteRenderbuffers, 1, &drv->renderTargetID);
549 drv->renderTargetID = 0;
550 }
551 #endif
552
553 if (alloc->mHal.drvState.lod[0].mallocPtr) {
554 // don't free user-allocated ptrs or IO_OUTPUT buffers
555 if (!(drv->useUserProvidedPtr) &&
556 !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) &&
557 !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
558 free(alloc->mHal.drvState.lod[0].mallocPtr);
559 }
560 alloc->mHal.drvState.lod[0].mallocPtr = nullptr;
561 }
562
563 #ifndef RS_COMPATIBILITY_LIB
564 #ifndef RS_VENDOR_LIB
565 if (drv->readBackFBO != nullptr) {
566 delete drv->readBackFBO;
567 drv->readBackFBO = nullptr;
568 }
569 #endif
570 if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) &&
571 (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
572 ANativeWindow *nw = drv->wndSurface;
573 if (nw) {
574 //If we have an attached surface, need to release it.
575 AHardwareBuffer* ahwb = ANativeWindowBuffer_getHardwareBuffer(drv->wndBuffer);
576 int fenceID = -1;
577 AHardwareBuffer_unlock(ahwb, &fenceID);
578 ANativeWindow_cancelBuffer(nw, drv->wndBuffer, fenceID);
579 ANativeWindow_release(nw);
580 drv->wndSurface = nullptr;
581 drv->wndBuffer = nullptr;
582 }
583 }
584 #endif
585 }
586
587 free(drv);
588 alloc->mHal.drv = nullptr;
589 }
590
rsdAllocationResize(const Context * rsc,const Allocation * alloc,const Type * newType,bool zeroNew)591 void rsdAllocationResize(const Context *rsc, const Allocation *alloc,
592 const Type *newType, bool zeroNew) {
593 const uint32_t oldDimX = alloc->mHal.drvState.lod[0].dimX;
594 const uint32_t dimX = newType->getDimX();
595
596 // can't resize Allocations with user-allocated buffers
597 if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
598 ALOGE("Resize cannot be called on a USAGE_SHARED allocation");
599 return;
600 }
601 void * oldPtr = alloc->mHal.drvState.lod[0].mallocPtr;
602 // Calculate the object size
603 size_t s = AllocationBuildPointerTable(rsc, alloc, newType, nullptr);
604 uint8_t *ptr = (uint8_t *)realloc(oldPtr, s);
605 // Build the relative pointer tables.
606 size_t verifySize = AllocationBuildPointerTable(rsc, alloc, newType, ptr);
607 if(s != verifySize) {
608 rsAssert(!"Size mismatch");
609 }
610
611
612 if (dimX > oldDimX) {
613 size_t stride = alloc->mHal.state.elementSizeBytes;
614 memset(((uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr) + stride * oldDimX,
615 0, stride * (dimX - oldDimX));
616 }
617 }
618
rsdAllocationSyncFromFBO(const Context * rsc,const Allocation * alloc)619 static void rsdAllocationSyncFromFBO(const Context *rsc, const Allocation *alloc) {
620 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
621 if (!alloc->getIsScript()) {
622 return; // nothing to sync
623 }
624
625 RsdHal *dc = (RsdHal *)rsc->mHal.drv;
626 RsdFrameBufferObj *lastFbo = dc->gl.currentFrameBuffer;
627
628 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
629 if (!drv->textureID && !drv->renderTargetID) {
630 return; // nothing was rendered here yet, so nothing to sync
631 }
632 if (drv->readBackFBO == nullptr) {
633 drv->readBackFBO = new RsdFrameBufferObj();
634 drv->readBackFBO->setColorTarget(drv, 0);
635 drv->readBackFBO->setDimensions(alloc->getType()->getDimX(),
636 alloc->getType()->getDimY());
637 }
638
639 // Bind the framebuffer object so we can read back from it
640 drv->readBackFBO->setActive(rsc);
641
642 // Do the readback
643 RSD_CALL_GL(glReadPixels, 0, 0, alloc->mHal.drvState.lod[0].dimX,
644 alloc->mHal.drvState.lod[0].dimY,
645 drv->glFormat, drv->glType, alloc->mHal.drvState.lod[0].mallocPtr);
646
647 // Revert framebuffer to its original
648 lastFbo->setActive(rsc);
649 #endif
650 }
651
652
rsdAllocationSyncAll(const Context * rsc,const Allocation * alloc,RsAllocationUsageType src)653 void rsdAllocationSyncAll(const Context *rsc, const Allocation *alloc,
654 RsAllocationUsageType src) {
655 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
656
657 if (src == RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
658 if(!alloc->getIsRenderTarget()) {
659 rsc->setError(RS_ERROR_FATAL_DRIVER,
660 "Attempting to sync allocation from render target, "
661 "for non-render target allocation");
662 } else if (alloc->getType()->getElement()->getKind() != RS_KIND_PIXEL_RGBA) {
663 rsc->setError(RS_ERROR_FATAL_DRIVER, "Cannot only sync from RGBA"
664 "render target");
665 } else {
666 rsdAllocationSyncFromFBO(rsc, alloc);
667 }
668 return;
669 }
670
671 rsAssert(src == RS_ALLOCATION_USAGE_SCRIPT || src == RS_ALLOCATION_USAGE_SHARED);
672
673 if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
674 UploadToTexture(rsc, alloc);
675 } else {
676 if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) &&
677 !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
678 AllocateRenderTarget(rsc, alloc);
679 }
680 }
681 if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
682 UploadToBufferObject(rsc, alloc);
683 }
684
685 if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
686
687 if (src == RS_ALLOCATION_USAGE_SHARED) {
688 // just a memory fence for the CPU driver
689 // vendor drivers probably want to flush any dirty cachelines for
690 // this particular Allocation
691 __sync_synchronize();
692 }
693 }
694
695 drv->uploadDeferred = false;
696 }
697
rsdAllocationMarkDirty(const Context * rsc,const Allocation * alloc)698 void rsdAllocationMarkDirty(const Context *rsc, const Allocation *alloc) {
699 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
700 drv->uploadDeferred = true;
701 }
702
703 #ifndef RS_COMPATIBILITY_LIB
IoGetBuffer(const Context * rsc,Allocation * alloc,ANativeWindow * nw)704 static bool IoGetBuffer(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
705 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
706 // Must lock the whole surface
707 int fenceID = -1;
708 int r = ANativeWindow_dequeueBuffer(nw, &drv->wndBuffer, &fenceID);
709 if (r) {
710 rsc->setError(RS_ERROR_DRIVER, "Error dequeueing IO output buffer.");
711 close(fenceID);
712 return false;
713 }
714
715 void *dst = nullptr;
716 AHardwareBuffer* ahwb = ANativeWindowBuffer_getHardwareBuffer(drv->wndBuffer);
717 r = AHardwareBuffer_lock(ahwb, AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
718 fenceID, NULL, &dst);
719 if (r) {
720 rsc->setError(RS_ERROR_DRIVER, "Error Locking IO output buffer.");
721 return false;
722 }
723 alloc->mHal.drvState.lod[0].mallocPtr = dst;
724 alloc->mHal.drvState.lod[0].stride = drv->wndBuffer->stride * alloc->mHal.state.elementSizeBytes;
725 rsAssert((alloc->mHal.drvState.lod[0].stride & 0xf) == 0);
726
727 return true;
728 }
729 #endif
730
rsdAllocationSetSurface(const Context * rsc,Allocation * alloc,ANativeWindow * nw)731 void rsdAllocationSetSurface(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
732 #ifndef RS_COMPATIBILITY_LIB
733 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
734
735 // Cleanup old surface if there is one.
736 if (drv->wndSurface) {
737 ANativeWindow *old = drv->wndSurface;
738 AHardwareBuffer* ahwb = ANativeWindowBuffer_getHardwareBuffer(drv->wndBuffer);
739 int fenceID = -1;
740 int32_t r = AHardwareBuffer_unlock(ahwb, &fenceID);
741 if (r) {
742 rsc->setError(RS_ERROR_DRIVER, "Error unlocking output buffer.");
743 close(fenceID);
744 return;
745 }
746 r = ANativeWindow_cancelBuffer(old, drv->wndBuffer, fenceID);
747 if (r) {
748 rsc->setError(RS_ERROR_DRIVER, "Error canceling output buffer.");
749 return;
750 }
751 ANativeWindow_release(old);
752 drv->wndSurface = nullptr;
753 drv->wndBuffer = nullptr;
754 }
755
756 if (nw) {
757 int32_t r = ANativeWindow_setBuffersGeometry(nw, alloc->mHal.drvState.lod[0].dimX,
758 alloc->mHal.drvState.lod[0].dimY,
759 WINDOW_FORMAT_RGBA_8888);
760 if (r) {
761 rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer geometry.");
762 return;
763 }
764
765 IoGetBuffer(rsc, alloc, nw);
766 drv->wndSurface = nw;
767 }
768
769 return;
770 #endif
771 }
772
rsdAllocationIoSend(const Context * rsc,Allocation * alloc)773 void rsdAllocationIoSend(const Context *rsc, Allocation *alloc) {
774 #ifndef RS_COMPATIBILITY_LIB
775 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
776 ANativeWindow *nw = drv->wndSurface;
777 #ifndef RS_VENDOR_LIB
778 if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
779 RsdHal *dc = (RsdHal *)rsc->mHal.drv;
780 RSD_CALL_GL(eglSwapBuffers, dc->gl.egl.display, dc->gl.egl.surface);
781 return;
782 }
783 #endif
784 if (nw) {
785 if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
786 AHardwareBuffer* ahwb = ANativeWindowBuffer_getHardwareBuffer(drv->wndBuffer);
787 int fenceID = -1;
788 int32_t r = AHardwareBuffer_unlock(ahwb, &fenceID);
789 if (r) {
790 rsc->setError(RS_ERROR_DRIVER, "Error unlock output buffer.");
791 close(fenceID);
792 return;
793 }
794 r = ANativeWindow_queueBuffer(nw, drv->wndBuffer, fenceID);
795 if (r) {
796 rsc->setError(RS_ERROR_DRIVER, "Error sending IO output buffer.");
797 return;
798 }
799 drv->wndBuffer = nullptr;
800 IoGetBuffer(rsc, alloc, nw);
801 }
802 } else {
803 rsc->setError(RS_ERROR_DRIVER, "Sent IO buffer with no attached surface.");
804 return;
805 }
806 #endif
807 }
808
rsdAllocationIoReceive(const Context * rsc,Allocation * alloc)809 void rsdAllocationIoReceive(const Context *rsc, Allocation *alloc) {
810 if (alloc->mHal.state.yuv) {
811 DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
812 }
813 }
814
815
rsdAllocationData1D(const Context * rsc,const Allocation * alloc,uint32_t xoff,uint32_t lod,size_t count,const void * data,size_t sizeBytes)816 void rsdAllocationData1D(const Context *rsc, const Allocation *alloc,
817 uint32_t xoff, uint32_t lod, size_t count,
818 const void *data, size_t sizeBytes) {
819 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
820
821 const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
822 uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
823 size_t size = count * eSize;
824 if (ptr != data) {
825 // Skip the copy if we are the same allocation. This can arise from
826 // our Bitmap optimization, where we share the same storage.
827 if (alloc->mHal.state.hasReferences) {
828 alloc->incRefs(data, count);
829 alloc->decRefs(ptr, count);
830 }
831 memcpy(ptr, data, size);
832 }
833 drv->uploadDeferred = true;
834 }
835
rsdAllocationData2D(const Context * rsc,const Allocation * alloc,uint32_t xoff,uint32_t yoff,uint32_t lod,RsAllocationCubemapFace face,uint32_t w,uint32_t h,const void * data,size_t sizeBytes,size_t stride)836 void rsdAllocationData2D(const Context *rsc, const Allocation *alloc,
837 uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
838 uint32_t w, uint32_t h, const void *data, size_t sizeBytes, size_t stride) {
839 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
840
841 size_t eSize = alloc->mHal.state.elementSizeBytes;
842 size_t lineSize = eSize * w;
843 if (!stride) {
844 stride = lineSize;
845 }
846
847 if (alloc->mHal.drvState.lod[0].mallocPtr) {
848 const uint8_t *src = static_cast<const uint8_t *>(data);
849 uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
850 if (dst == src) {
851 // Skip the copy if we are the same allocation. This can arise from
852 // our Bitmap optimization, where we share the same storage.
853 drv->uploadDeferred = true;
854 return;
855 }
856
857 for (uint32_t line=yoff; line < (yoff+h); line++) {
858 if (alloc->mHal.state.hasReferences) {
859 alloc->incRefs(src, w);
860 alloc->decRefs(dst, w);
861 }
862 memcpy(dst, src, lineSize);
863 src += stride;
864 dst += alloc->mHal.drvState.lod[lod].stride;
865 }
866 if (alloc->mHal.state.yuv) {
867 size_t clineSize = lineSize;
868 int lod = 1;
869 int maxLod = 2;
870 if (alloc->mHal.state.yuv == RS_YUV_YV12) {
871 maxLod = 3;
872 clineSize >>= 1;
873 } else if (alloc->mHal.state.yuv == RS_YUV_NV21) {
874 lod = 2;
875 maxLod = 3;
876 }
877
878 while (lod < maxLod) {
879 uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
880
881 for (uint32_t line=(yoff >> 1); line < ((yoff+h)>>1); line++) {
882 memcpy(dst, src, clineSize);
883 // When copying from an array to an Allocation, the src pointer
884 // to the array should just move by the number of bytes copied.
885 src += clineSize;
886 dst += alloc->mHal.drvState.lod[lod].stride;
887 }
888 lod++;
889 }
890
891 }
892 drv->uploadDeferred = true;
893 } else {
894 Update2DTexture(rsc, alloc, data, xoff, yoff, lod, face, w, h);
895 }
896 }
897
rsdAllocationData3D(const Context * rsc,const Allocation * alloc,uint32_t xoff,uint32_t yoff,uint32_t zoff,uint32_t lod,uint32_t w,uint32_t h,uint32_t d,const void * data,size_t sizeBytes,size_t stride)898 void rsdAllocationData3D(const Context *rsc, const Allocation *alloc,
899 uint32_t xoff, uint32_t yoff, uint32_t zoff,
900 uint32_t lod,
901 uint32_t w, uint32_t h, uint32_t d, const void *data,
902 size_t sizeBytes, size_t stride) {
903 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
904
905 uint32_t eSize = alloc->mHal.state.elementSizeBytes;
906 uint32_t lineSize = eSize * w;
907 if (!stride) {
908 stride = lineSize;
909 }
910
911 if (alloc->mHal.drvState.lod[0].mallocPtr) {
912 const uint8_t *src = static_cast<const uint8_t *>(data);
913 for (uint32_t z = zoff; z < (d + zoff); z++) {
914 uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, z, lod,
915 RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
916 if (dst == src) {
917 // Skip the copy if we are the same allocation. This can arise from
918 // our Bitmap optimization, where we share the same storage.
919 drv->uploadDeferred = true;
920 return;
921 }
922
923 for (uint32_t line=yoff; line < (yoff+h); line++) {
924 if (alloc->mHal.state.hasReferences) {
925 alloc->incRefs(src, w);
926 alloc->decRefs(dst, w);
927 }
928 memcpy(dst, src, lineSize);
929 src += stride;
930 dst += alloc->mHal.drvState.lod[lod].stride;
931 }
932 }
933 drv->uploadDeferred = true;
934 }
935 }
936
rsdAllocationRead1D(const Context * rsc,const Allocation * alloc,uint32_t xoff,uint32_t lod,size_t count,void * data,size_t sizeBytes)937 void rsdAllocationRead1D(const Context *rsc, const Allocation *alloc,
938 uint32_t xoff, uint32_t lod, size_t count,
939 void *data, size_t sizeBytes) {
940 const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
941 const uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
942 if (data != ptr) {
943 // Skip the copy if we are the same allocation. This can arise from
944 // our Bitmap optimization, where we share the same storage.
945 memcpy(data, ptr, count * eSize);
946 }
947 }
948
rsdAllocationRead2D(const Context * rsc,const Allocation * alloc,uint32_t xoff,uint32_t yoff,uint32_t lod,RsAllocationCubemapFace face,uint32_t w,uint32_t h,void * data,size_t sizeBytes,size_t stride)949 void rsdAllocationRead2D(const Context *rsc, const Allocation *alloc,
950 uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
951 uint32_t w, uint32_t h, void *data, size_t sizeBytes, size_t stride) {
952 size_t eSize = alloc->mHal.state.elementSizeBytes;
953 size_t lineSize = eSize * w;
954 if (!stride) {
955 stride = lineSize;
956 }
957
958 if (alloc->mHal.drvState.lod[0].mallocPtr) {
959 uint8_t *dst = static_cast<uint8_t *>(data);
960 const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
961 if (dst == src) {
962 // Skip the copy if we are the same allocation. This can arise from
963 // our Bitmap optimization, where we share the same storage.
964 return;
965 }
966
967 for (uint32_t line=yoff; line < (yoff+h); line++) {
968 memcpy(dst, src, lineSize);
969 dst += stride;
970 src += alloc->mHal.drvState.lod[lod].stride;
971 }
972 } else {
973 ALOGE("Add code to readback from non-script memory");
974 }
975 }
976
977
rsdAllocationRead3D(const Context * rsc,const Allocation * alloc,uint32_t xoff,uint32_t yoff,uint32_t zoff,uint32_t lod,uint32_t w,uint32_t h,uint32_t d,void * data,size_t sizeBytes,size_t stride)978 void rsdAllocationRead3D(const Context *rsc, const Allocation *alloc,
979 uint32_t xoff, uint32_t yoff, uint32_t zoff,
980 uint32_t lod,
981 uint32_t w, uint32_t h, uint32_t d, void *data, size_t sizeBytes, size_t stride) {
982 uint32_t eSize = alloc->mHal.state.elementSizeBytes;
983 uint32_t lineSize = eSize * w;
984 if (!stride) {
985 stride = lineSize;
986 }
987
988 if (alloc->mHal.drvState.lod[0].mallocPtr) {
989 uint8_t *dst = static_cast<uint8_t *>(data);
990 for (uint32_t z = zoff; z < (d + zoff); z++) {
991 const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, z, lod,
992 RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
993 if (dst == src) {
994 // Skip the copy if we are the same allocation. This can arise from
995 // our Bitmap optimization, where we share the same storage.
996 return;
997 }
998
999 for (uint32_t line=yoff; line < (yoff+h); line++) {
1000 memcpy(dst, src, lineSize);
1001 dst += stride;
1002 src += alloc->mHal.drvState.lod[lod].stride;
1003 }
1004 }
1005 }
1006 }
1007
rsdAllocationLock1D(const android::renderscript::Context * rsc,const android::renderscript::Allocation * alloc)1008 void * rsdAllocationLock1D(const android::renderscript::Context *rsc,
1009 const android::renderscript::Allocation *alloc) {
1010 return alloc->mHal.drvState.lod[0].mallocPtr;
1011 }
1012
rsdAllocationUnlock1D(const android::renderscript::Context * rsc,const android::renderscript::Allocation * alloc)1013 void rsdAllocationUnlock1D(const android::renderscript::Context *rsc,
1014 const android::renderscript::Allocation *alloc) {
1015
1016 }
1017
rsdAllocationData1D_alloc(const android::renderscript::Context * rsc,const android::renderscript::Allocation * dstAlloc,uint32_t dstXoff,uint32_t dstLod,size_t count,const android::renderscript::Allocation * srcAlloc,uint32_t srcXoff,uint32_t srcLod)1018 void rsdAllocationData1D_alloc(const android::renderscript::Context *rsc,
1019 const android::renderscript::Allocation *dstAlloc,
1020 uint32_t dstXoff, uint32_t dstLod, size_t count,
1021 const android::renderscript::Allocation *srcAlloc,
1022 uint32_t srcXoff, uint32_t srcLod) {
1023 }
1024
1025
rsdAllocationData2D_alloc_script(const android::renderscript::Context * rsc,const android::renderscript::Allocation * dstAlloc,uint32_t dstXoff,uint32_t dstYoff,uint32_t dstLod,RsAllocationCubemapFace dstFace,uint32_t w,uint32_t h,const android::renderscript::Allocation * srcAlloc,uint32_t srcXoff,uint32_t srcYoff,uint32_t srcLod,RsAllocationCubemapFace srcFace)1026 void rsdAllocationData2D_alloc_script(const android::renderscript::Context *rsc,
1027 const android::renderscript::Allocation *dstAlloc,
1028 uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
1029 RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
1030 const android::renderscript::Allocation *srcAlloc,
1031 uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
1032 RsAllocationCubemapFace srcFace) {
1033 size_t elementSize = dstAlloc->getType()->getElementSizeBytes();
1034 for (uint32_t i = 0; i < h; i ++) {
1035 uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, 0, dstLod, dstFace);
1036 uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, 0, srcLod, srcFace);
1037 memcpy(dstPtr, srcPtr, w * elementSize);
1038
1039 //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
1040 // dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
1041 }
1042 }
1043
rsdAllocationData3D_alloc_script(const android::renderscript::Context * rsc,const android::renderscript::Allocation * dstAlloc,uint32_t dstXoff,uint32_t dstYoff,uint32_t dstZoff,uint32_t dstLod,uint32_t w,uint32_t h,uint32_t d,const android::renderscript::Allocation * srcAlloc,uint32_t srcXoff,uint32_t srcYoff,uint32_t srcZoff,uint32_t srcLod)1044 void rsdAllocationData3D_alloc_script(const android::renderscript::Context *rsc,
1045 const android::renderscript::Allocation *dstAlloc,
1046 uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff, uint32_t dstLod,
1047 uint32_t w, uint32_t h, uint32_t d,
1048 const android::renderscript::Allocation *srcAlloc,
1049 uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff, uint32_t srcLod) {
1050 uint32_t elementSize = dstAlloc->getType()->getElementSizeBytes();
1051 for (uint32_t j = 0; j < d; j++) {
1052 for (uint32_t i = 0; i < h; i ++) {
1053 uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, dstZoff + j,
1054 dstLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1055 uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, srcZoff + j,
1056 srcLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1057 memcpy(dstPtr, srcPtr, w * elementSize);
1058
1059 //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
1060 // dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
1061 }
1062 }
1063 }
1064
rsdAllocationData2D_alloc(const android::renderscript::Context * rsc,const android::renderscript::Allocation * dstAlloc,uint32_t dstXoff,uint32_t dstYoff,uint32_t dstLod,RsAllocationCubemapFace dstFace,uint32_t w,uint32_t h,const android::renderscript::Allocation * srcAlloc,uint32_t srcXoff,uint32_t srcYoff,uint32_t srcLod,RsAllocationCubemapFace srcFace)1065 void rsdAllocationData2D_alloc(const android::renderscript::Context *rsc,
1066 const android::renderscript::Allocation *dstAlloc,
1067 uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
1068 RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
1069 const android::renderscript::Allocation *srcAlloc,
1070 uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
1071 RsAllocationCubemapFace srcFace) {
1072 if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
1073 rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
1074 "yet implemented.");
1075 return;
1076 }
1077 rsdAllocationData2D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff,
1078 dstLod, dstFace, w, h, srcAlloc,
1079 srcXoff, srcYoff, srcLod, srcFace);
1080 }
1081
rsdAllocationData3D_alloc(const android::renderscript::Context * rsc,const android::renderscript::Allocation * dstAlloc,uint32_t dstXoff,uint32_t dstYoff,uint32_t dstZoff,uint32_t dstLod,uint32_t w,uint32_t h,uint32_t d,const android::renderscript::Allocation * srcAlloc,uint32_t srcXoff,uint32_t srcYoff,uint32_t srcZoff,uint32_t srcLod)1082 void rsdAllocationData3D_alloc(const android::renderscript::Context *rsc,
1083 const android::renderscript::Allocation *dstAlloc,
1084 uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff,
1085 uint32_t dstLod,
1086 uint32_t w, uint32_t h, uint32_t d,
1087 const android::renderscript::Allocation *srcAlloc,
1088 uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff,
1089 uint32_t srcLod) {
1090 if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
1091 rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
1092 "yet implemented.");
1093 return;
1094 }
1095 rsdAllocationData3D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff, dstZoff,
1096 dstLod, w, h, d, srcAlloc,
1097 srcXoff, srcYoff, srcZoff, srcLod);
1098 }
1099
rsdAllocationElementData(const Context * rsc,const Allocation * alloc,uint32_t x,uint32_t y,uint32_t z,const void * data,uint32_t cIdx,size_t sizeBytes)1100 void rsdAllocationElementData(const Context *rsc, const Allocation *alloc,
1101 uint32_t x, uint32_t y, uint32_t z,
1102 const void *data, uint32_t cIdx, size_t sizeBytes) {
1103 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1104
1105 uint8_t * ptr = GetOffsetPtr(alloc, x, y, z, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1106
1107 const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1108 ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1109
1110 if (alloc->mHal.state.hasReferences) {
1111 e->incRefs(data);
1112 e->decRefs(ptr);
1113 }
1114
1115 memcpy(ptr, data, sizeBytes);
1116 drv->uploadDeferred = true;
1117 }
1118
rsdAllocationElementRead(const Context * rsc,const Allocation * alloc,uint32_t x,uint32_t y,uint32_t z,void * data,uint32_t cIdx,size_t sizeBytes)1119 void rsdAllocationElementRead(const Context *rsc, const Allocation *alloc,
1120 uint32_t x, uint32_t y, uint32_t z,
1121 void *data, uint32_t cIdx, size_t sizeBytes) {
1122 DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1123
1124 uint8_t * ptr = GetOffsetPtr(alloc, x, y, z, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1125
1126 const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1127 ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1128
1129 memcpy(data, ptr, sizeBytes);
1130 }
1131
mip565(const Allocation * alloc,int lod,RsAllocationCubemapFace face)1132 static void mip565(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1133 uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1134 uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1135
1136 for (uint32_t y=0; y < h; y++) {
1137 uint16_t *oPtr = (uint16_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1138 const uint16_t *i1 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2, lod, face);
1139 const uint16_t *i2 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2+1, lod, face);
1140
1141 for (uint32_t x=0; x < w; x++) {
1142 *oPtr = rsBoxFilter565(i1[0], i1[1], i2[0], i2[1]);
1143 oPtr ++;
1144 i1 += 2;
1145 i2 += 2;
1146 }
1147 }
1148 }
1149
mip8888(const Allocation * alloc,int lod,RsAllocationCubemapFace face)1150 static void mip8888(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1151 uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1152 uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1153
1154 for (uint32_t y=0; y < h; y++) {
1155 uint32_t *oPtr = (uint32_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1156 const uint32_t *i1 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
1157 const uint32_t *i2 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
1158
1159 for (uint32_t x=0; x < w; x++) {
1160 *oPtr = rsBoxFilter8888(i1[0], i1[1], i2[0], i2[1]);
1161 oPtr ++;
1162 i1 += 2;
1163 i2 += 2;
1164 }
1165 }
1166 }
1167
mip8(const Allocation * alloc,int lod,RsAllocationCubemapFace face)1168 static void mip8(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1169 uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1170 uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1171
1172 for (uint32_t y=0; y < h; y++) {
1173 uint8_t *oPtr = GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1174 const uint8_t *i1 = GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
1175 const uint8_t *i2 = GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
1176
1177 for (uint32_t x=0; x < w; x++) {
1178 *oPtr = (uint8_t)(((uint32_t)i1[0] + i1[1] + i2[0] + i2[1]) * 0.25f);
1179 oPtr ++;
1180 i1 += 2;
1181 i2 += 2;
1182 }
1183 }
1184 }
1185
rsdAllocationGenerateMipmaps(const Context * rsc,const Allocation * alloc)1186 void rsdAllocationGenerateMipmaps(const Context *rsc, const Allocation *alloc) {
1187 if(!alloc->mHal.drvState.lod[0].mallocPtr) {
1188 return;
1189 }
1190 uint32_t numFaces = alloc->getType()->getDimFaces() ? 6 : 1;
1191 for (uint32_t face = 0; face < numFaces; face ++) {
1192 for (uint32_t lod=0; lod < (alloc->getType()->getLODCount() -1); lod++) {
1193 switch (alloc->getType()->getElement()->getSizeBits()) {
1194 case 32:
1195 mip8888(alloc, lod, (RsAllocationCubemapFace)face);
1196 break;
1197 case 16:
1198 mip565(alloc, lod, (RsAllocationCubemapFace)face);
1199 break;
1200 case 8:
1201 mip8(alloc, lod, (RsAllocationCubemapFace)face);
1202 break;
1203 }
1204 }
1205 }
1206 }
1207
rsdAllocationGrallocBits(const android::renderscript::Context * rsc,android::renderscript::Allocation * alloc)1208 uint32_t rsdAllocationGrallocBits(const android::renderscript::Context *rsc,
1209 android::renderscript::Allocation *alloc)
1210 {
1211 return 0;
1212 }
1213
rsdAllocationUpdateCachedObject(const Context * rsc,const Allocation * alloc,rs_allocation * obj)1214 void rsdAllocationUpdateCachedObject(const Context *rsc,
1215 const Allocation *alloc,
1216 rs_allocation *obj)
1217 {
1218 obj->p = alloc;
1219 #ifdef __LP64__
1220 obj->unused1 = nullptr;
1221 obj->unused2 = nullptr;
1222 obj->unused3 = nullptr;
1223 #endif
1224 }
1225