1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "rsdAllocation.h"
18 #include "rsdCore.h"
19 
20 #include <android/native_window.h>
21 
22 #ifdef RS_COMPATIBILITY_LIB
23 #include "rsCompatibilityLib.h"
24 #else
25 #include "rsdFrameBufferObj.h"
26 #include <vndk/window.h>
27 
28 #include <GLES/gl.h>
29 #include <GLES2/gl2.h>
30 #include <GLES/glext.h>
31 #endif
32 
33 #include <unistd.h> // for close()
34 
35 using android::renderscript::Allocation;
36 using android::renderscript::Context;
37 using android::renderscript::Element;
38 using android::renderscript::Type;
39 using android::renderscript::rs_allocation;
40 using android::renderscript::rsBoxFilter565;
41 using android::renderscript::rsBoxFilter8888;
42 using android::renderscript::rsMax;
43 using android::renderscript::rsRound;
44 
45 #ifndef RS_COMPATIBILITY_LIB
46 const static GLenum gFaceOrder[] = {
47     GL_TEXTURE_CUBE_MAP_POSITIVE_X,
48     GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
49     GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
50     GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
51     GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
52     GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
53 };
54 
rsdTypeToGLType(RsDataType t)55 GLenum rsdTypeToGLType(RsDataType t) {
56     switch (t) {
57     case RS_TYPE_UNSIGNED_5_6_5:    return GL_UNSIGNED_SHORT_5_6_5;
58     case RS_TYPE_UNSIGNED_5_5_5_1:  return GL_UNSIGNED_SHORT_5_5_5_1;
59     case RS_TYPE_UNSIGNED_4_4_4_4:  return GL_UNSIGNED_SHORT_4_4_4_4;
60 
61     //case RS_TYPE_FLOAT_16:      return GL_HALF_FLOAT;
62     case RS_TYPE_FLOAT_32:      return GL_FLOAT;
63     case RS_TYPE_UNSIGNED_8:    return GL_UNSIGNED_BYTE;
64     case RS_TYPE_UNSIGNED_16:   return GL_UNSIGNED_SHORT;
65     case RS_TYPE_SIGNED_8:      return GL_BYTE;
66     case RS_TYPE_SIGNED_16:     return GL_SHORT;
67     default:    break;
68     }
69     return 0;
70 }
71 
rsdKindToGLFormat(RsDataKind k)72 GLenum rsdKindToGLFormat(RsDataKind k) {
73     switch (k) {
74     case RS_KIND_PIXEL_L: return GL_LUMINANCE;
75     case RS_KIND_PIXEL_A: return GL_ALPHA;
76     case RS_KIND_PIXEL_LA: return GL_LUMINANCE_ALPHA;
77     case RS_KIND_PIXEL_RGB: return GL_RGB;
78     case RS_KIND_PIXEL_RGBA: return GL_RGBA;
79     case RS_KIND_PIXEL_DEPTH: return GL_DEPTH_COMPONENT16;
80     default: break;
81     }
82     return 0;
83 }
84 #endif
85 
GetOffsetPtr(const android::renderscript::Allocation * alloc,uint32_t xoff,uint32_t yoff,uint32_t zoff,uint32_t lod,RsAllocationCubemapFace face)86 uint8_t *GetOffsetPtr(const android::renderscript::Allocation *alloc,
87                       uint32_t xoff, uint32_t yoff, uint32_t zoff,
88                       uint32_t lod, RsAllocationCubemapFace face) {
89     uint8_t *ptr = (uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
90     ptr += face * alloc->mHal.drvState.faceOffset;
91     ptr += zoff * alloc->mHal.drvState.lod[lod].dimY * alloc->mHal.drvState.lod[lod].stride;
92     ptr += yoff * alloc->mHal.drvState.lod[lod].stride;
93     ptr += xoff * alloc->mHal.state.elementSizeBytes;
94     return ptr;
95 }
96 
97 
Update2DTexture(const Context * rsc,const Allocation * alloc,const void * ptr,uint32_t xoff,uint32_t yoff,uint32_t lod,RsAllocationCubemapFace face,uint32_t w,uint32_t h)98 static void Update2DTexture(const Context *rsc, const Allocation *alloc, const void *ptr,
99                             uint32_t xoff, uint32_t yoff, uint32_t lod,
100                             RsAllocationCubemapFace face, uint32_t w, uint32_t h) {
101 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
102     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
103 
104     rsAssert(drv->textureID);
105     RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
106     RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
107     GLenum t = GL_TEXTURE_2D;
108     if (alloc->mHal.state.hasFaces) {
109         t = gFaceOrder[face];
110     }
111     RSD_CALL_GL(glTexSubImage2D, t, lod, xoff, yoff, w, h, drv->glFormat, drv->glType, ptr);
112 #endif
113 }
114 
115 
116 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
Upload2DTexture(const Context * rsc,const Allocation * alloc,bool isFirstUpload)117 static void Upload2DTexture(const Context *rsc, const Allocation *alloc, bool isFirstUpload) {
118     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
119 
120     RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
121     RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
122 
123     uint32_t faceCount = 1;
124     if (alloc->mHal.state.hasFaces) {
125         faceCount = 6;
126     }
127 
128     rsdGLCheckError(rsc, "Upload2DTexture 1 ");
129     for (uint32_t face = 0; face < faceCount; face ++) {
130         for (uint32_t lod = 0; lod < alloc->mHal.state.type->getLODCount(); lod++) {
131             const uint8_t *p = GetOffsetPtr(alloc, 0, 0, 0, lod, (RsAllocationCubemapFace)face);
132 
133             GLenum t = GL_TEXTURE_2D;
134             if (alloc->mHal.state.hasFaces) {
135                 t = gFaceOrder[face];
136             }
137 
138             if (isFirstUpload) {
139                 RSD_CALL_GL(glTexImage2D, t, lod, drv->glFormat,
140                              alloc->mHal.state.type->getLODDimX(lod),
141                              alloc->mHal.state.type->getLODDimY(lod),
142                              0, drv->glFormat, drv->glType, p);
143             } else {
144                 RSD_CALL_GL(glTexSubImage2D, t, lod, 0, 0,
145                                 alloc->mHal.state.type->getLODDimX(lod),
146                                 alloc->mHal.state.type->getLODDimY(lod),
147                                 drv->glFormat, drv->glType, p);
148             }
149         }
150     }
151 
152     if (alloc->mHal.state.mipmapControl == RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE) {
153         RSD_CALL_GL(glGenerateMipmap, drv->glTarget);
154     }
155     rsdGLCheckError(rsc, "Upload2DTexture");
156 }
157 #endif
158 
UploadToTexture(const Context * rsc,const Allocation * alloc)159 static void UploadToTexture(const Context *rsc, const Allocation *alloc) {
160 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
161     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
162 
163     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
164         if (!drv->textureID) {
165             RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
166         }
167         return;
168     }
169 
170     if (!drv->glType || !drv->glFormat) {
171         return;
172     }
173 
174     if (!alloc->mHal.drvState.lod[0].mallocPtr) {
175         return;
176     }
177 
178     bool isFirstUpload = false;
179 
180     if (!drv->textureID) {
181         RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
182         isFirstUpload = true;
183     }
184 
185     Upload2DTexture(rsc, alloc, isFirstUpload);
186 
187     if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
188         if (alloc->mHal.drvState.lod[0].mallocPtr) {
189             free(alloc->mHal.drvState.lod[0].mallocPtr);
190             alloc->mHal.drvState.lod[0].mallocPtr = nullptr;
191         }
192     }
193     rsdGLCheckError(rsc, "UploadToTexture");
194 #endif
195 }
196 
AllocateRenderTarget(const Context * rsc,const Allocation * alloc)197 static void AllocateRenderTarget(const Context *rsc, const Allocation *alloc) {
198 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
199     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
200 
201     if (!drv->glFormat) {
202         return;
203     }
204 
205     if (!drv->renderTargetID) {
206         RSD_CALL_GL(glGenRenderbuffers, 1, &drv->renderTargetID);
207 
208         if (!drv->renderTargetID) {
209             // This should generally not happen
210             ALOGE("allocateRenderTarget failed to gen mRenderTargetID");
211             rsc->dumpDebug();
212             return;
213         }
214         RSD_CALL_GL(glBindRenderbuffer, GL_RENDERBUFFER, drv->renderTargetID);
215         RSD_CALL_GL(glRenderbufferStorage, GL_RENDERBUFFER, drv->glFormat,
216                     alloc->mHal.drvState.lod[0].dimX, alloc->mHal.drvState.lod[0].dimY);
217     }
218     rsdGLCheckError(rsc, "AllocateRenderTarget");
219 #endif
220 }
221 
UploadToBufferObject(const Context * rsc,const Allocation * alloc)222 static void UploadToBufferObject(const Context *rsc, const Allocation *alloc) {
223 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
224     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
225 
226     rsAssert(!alloc->mHal.state.type->getDimY());
227     rsAssert(!alloc->mHal.state.type->getDimZ());
228 
229     //alloc->mHal.state.usageFlags |= RS_ALLOCATION_USAGE_GRAPHICS_VERTEX;
230 
231     if (!drv->bufferID) {
232         RSD_CALL_GL(glGenBuffers, 1, &drv->bufferID);
233     }
234     if (!drv->bufferID) {
235         ALOGE("Upload to buffer object failed");
236         drv->uploadDeferred = true;
237         return;
238     }
239     RSD_CALL_GL(glBindBuffer, drv->glTarget, drv->bufferID);
240     RSD_CALL_GL(glBufferData, drv->glTarget,
241                 alloc->mHal.state.type->getPackedSizeBytes(),
242                 alloc->mHal.drvState.lod[0].mallocPtr, GL_DYNAMIC_DRAW);
243     RSD_CALL_GL(glBindBuffer, drv->glTarget, 0);
244     rsdGLCheckError(rsc, "UploadToBufferObject");
245 #endif
246 }
247 
248 
DeriveYUVLayout(int yuv,Allocation::Hal::DrvState * state)249 static size_t DeriveYUVLayout(int yuv, Allocation::Hal::DrvState *state) {
250 #ifndef RS_COMPATIBILITY_LIB
251     // For the flexible YCbCr format, layout is initialized during call to
252     // Allocation::ioReceive.  Return early and avoid clobberring any
253     // pre-existing layout.
254     if (yuv == RS_YUV_420_888) {
255         return 0;
256     }
257 #endif
258 
259     // YUV only supports basic 2d
260     // so we can stash the plane pointers in the mipmap levels.
261     size_t uvSize = 0;
262     state->lod[1].dimX = state->lod[0].dimX / 2;
263     state->lod[1].dimY = state->lod[0].dimY / 2;
264     state->lod[2].dimX = state->lod[0].dimX / 2;
265     state->lod[2].dimY = state->lod[0].dimY / 2;
266     state->yuv.shift = 1;
267     state->yuv.step = 1;
268     state->lodCount = 3;
269 
270     switch(yuv) {
271     case RS_YUV_YV12:
272         state->lod[2].stride = rsRound(state->lod[0].stride >> 1, 16);
273         state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
274                 (state->lod[0].stride * state->lod[0].dimY);
275         uvSize += state->lod[2].stride * state->lod[2].dimY;
276 
277         state->lod[1].stride = state->lod[2].stride;
278         state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) +
279                 (state->lod[2].stride * state->lod[2].dimY);
280         uvSize += state->lod[1].stride * state->lod[2].dimY;
281         break;
282     case RS_YUV_NV21:
283         //state->lod[1].dimX = state->lod[0].dimX;
284         state->lod[1].stride = state->lod[0].stride;
285         state->lod[2].stride = state->lod[0].stride;
286         state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
287                 (state->lod[0].stride * state->lod[0].dimY);
288         state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) + 1;
289         uvSize += state->lod[1].stride * state->lod[1].dimY;
290         state->yuv.step = 2;
291         break;
292     default:
293         rsAssert(0);
294     }
295     return uvSize;
296 }
297 
AllocationBuildPointerTable(const Context * rsc,const Allocation * alloc,const Type * type,uint8_t * ptr,size_t requiredAlignment)298 static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
299                                           const Type *type, uint8_t *ptr, size_t requiredAlignment) {
300     alloc->mHal.drvState.lod[0].dimX = type->getDimX();
301     alloc->mHal.drvState.lod[0].dimY = type->getDimY();
302     alloc->mHal.drvState.lod[0].dimZ = type->getDimZ();
303     alloc->mHal.drvState.lod[0].mallocPtr = 0;
304     // Stride needs to be aligned to a boundary defined by requiredAlignment!
305     size_t stride = alloc->mHal.drvState.lod[0].dimX * type->getElementSizeBytes();
306     alloc->mHal.drvState.lod[0].stride = rsRound(stride, requiredAlignment);
307     alloc->mHal.drvState.lodCount = type->getLODCount();
308     alloc->mHal.drvState.faceCount = type->getDimFaces();
309 
310     size_t offsets[Allocation::MAX_LOD];
311     memset(offsets, 0, sizeof(offsets));
312 
313     size_t o = alloc->mHal.drvState.lod[0].stride * rsMax(alloc->mHal.drvState.lod[0].dimY, 1u) *
314             rsMax(alloc->mHal.drvState.lod[0].dimZ, 1u);
315     if (alloc->mHal.state.yuv) {
316         o += DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
317 
318         for (uint32_t ct = 1; ct < alloc->mHal.drvState.lodCount; ct++) {
319             offsets[ct] = (size_t)alloc->mHal.drvState.lod[ct].mallocPtr;
320         }
321     } else if(alloc->mHal.drvState.lodCount > 1) {
322         uint32_t tx = alloc->mHal.drvState.lod[0].dimX;
323         uint32_t ty = alloc->mHal.drvState.lod[0].dimY;
324         uint32_t tz = alloc->mHal.drvState.lod[0].dimZ;
325         for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
326             alloc->mHal.drvState.lod[lod].dimX = tx;
327             alloc->mHal.drvState.lod[lod].dimY = ty;
328             alloc->mHal.drvState.lod[lod].dimZ = tz;
329             alloc->mHal.drvState.lod[lod].stride =
330                     rsRound(tx * type->getElementSizeBytes(), requiredAlignment);
331             offsets[lod] = o;
332             o += alloc->mHal.drvState.lod[lod].stride * rsMax(ty, 1u) * rsMax(tz, 1u);
333             if (tx > 1) tx >>= 1;
334             if (ty > 1) ty >>= 1;
335             if (tz > 1) tz >>= 1;
336         }
337     }
338 
339     alloc->mHal.drvState.faceOffset = o;
340 
341     alloc->mHal.drvState.lod[0].mallocPtr = ptr;
342     for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
343         alloc->mHal.drvState.lod[lod].mallocPtr = ptr + offsets[lod];
344     }
345 
346     size_t allocSize = alloc->mHal.drvState.faceOffset;
347     if(alloc->mHal.drvState.faceCount) {
348         allocSize *= 6;
349     }
350 
351     return allocSize;
352 }
353 
AllocationBuildPointerTable(const Context * rsc,const Allocation * alloc,const Type * type,uint8_t * ptr)354 static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
355                                           const Type *type, uint8_t *ptr) {
356     return AllocationBuildPointerTable(rsc, alloc, type, ptr, Allocation::kMinimumRSAlignment);
357 }
358 
allocAlignedMemory(size_t allocSize,bool forceZero,size_t requiredAlignment)359 static uint8_t* allocAlignedMemory(size_t allocSize, bool forceZero, size_t requiredAlignment) {
360     // We align all allocations to a boundary defined by requiredAlignment.
361     uint8_t* ptr = (uint8_t *)memalign(requiredAlignment, allocSize);
362     if (!ptr) {
363         return nullptr;
364     }
365     if (forceZero) {
366         memset(ptr, 0, allocSize);
367     }
368     return ptr;
369 }
370 
rsdAllocationInitStrided(const Context * rsc,Allocation * alloc,bool forceZero,size_t requiredAlignment)371 bool rsdAllocationInitStrided(const Context *rsc, Allocation *alloc, bool forceZero, size_t requiredAlignment) {
372     DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
373     if (!drv) {
374         return false;
375     }
376     alloc->mHal.drv = drv;
377 
378     // Check if requiredAlignment is power of 2, also requiredAlignment should be larger or equal than kMinimumRSAlignment.
379     if ((requiredAlignment & (requiredAlignment-1)) != 0 || requiredAlignment < Allocation::kMinimumRSAlignment) {
380         ALOGE("requiredAlignment must be power of 2");
381         return false;
382     }
383     // Calculate the object size.
384     size_t allocSize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), nullptr, requiredAlignment);
385 
386     uint8_t * ptr = nullptr;
387     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) {
388 
389     } else if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
390         // Allocation is allocated when the surface is created
391         // in getSurface
392 #ifdef RS_COMPATIBILITY_LIB
393     } else if (alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_INCREMENTAL_SUPPORT | RS_ALLOCATION_USAGE_SHARED)) {
394         if (alloc->mHal.state.userProvidedPtr == nullptr) {
395             ALOGE("User-backed buffer pointer cannot be null");
396             return false;
397         }
398         if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
399             ALOGE("User-allocated buffers must not have multiple faces or LODs");
400             return false;
401         }
402 
403         drv->useUserProvidedPtr = true;
404         ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
405 #endif
406     } else if (alloc->mHal.state.userProvidedPtr != nullptr) {
407         // user-provided allocation
408         // limitations: no faces, no LOD, USAGE_SCRIPT or SCRIPT+TEXTURE only
409         if (!(alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED) ||
410               alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED | RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE))) {
411             ALOGE("Can't use user-allocated buffers if usage is not USAGE_SCRIPT | USAGE_SHARED or USAGE_SCRIPT | USAGE_SHARED | USAGE_GRAPHICS_TEXTURE");
412             return false;
413         }
414         if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
415             ALOGE("User-allocated buffers must not have multiple faces or LODs");
416             return false;
417         }
418 
419         // rows must be aligned based on requiredAlignment.
420         // validate that here, otherwise fall back to not use the user-backed allocation
421         if (((alloc->getType()->getDimX() * alloc->getType()->getElement()->getSizeBytes()) % requiredAlignment) != 0) {
422             ALOGV("User-backed allocation failed stride requirement, falling back to separate allocation");
423             drv->useUserProvidedPtr = false;
424 
425             ptr = allocAlignedMemory(allocSize, forceZero, requiredAlignment);
426             if (!ptr) {
427                 alloc->mHal.drv = nullptr;
428                 free(drv);
429                 return false;
430             }
431 
432         } else {
433             drv->useUserProvidedPtr = true;
434             ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
435         }
436     } else {
437         ptr = allocAlignedMemory(allocSize, forceZero, requiredAlignment);
438         if (!ptr) {
439             alloc->mHal.drv = nullptr;
440             free(drv);
441             return false;
442         }
443     }
444     // Build the pointer tables
445     size_t verifySize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), ptr, requiredAlignment);
446     if(allocSize != verifySize) {
447         rsAssert(!"Size mismatch");
448     }
449 
450     drv->glTarget = GL_NONE;
451     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
452         if (alloc->mHal.state.hasFaces) {
453             drv->glTarget = GL_TEXTURE_CUBE_MAP;
454         } else {
455             drv->glTarget = GL_TEXTURE_2D;
456         }
457     } else {
458         if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
459             drv->glTarget = GL_ARRAY_BUFFER;
460         }
461     }
462 
463 #ifndef RS_COMPATIBILITY_LIB
464     drv->glType = rsdTypeToGLType(alloc->mHal.state.type->getElement()->getComponent().getType());
465     drv->glFormat = rsdKindToGLFormat(alloc->mHal.state.type->getElement()->getComponent().getKind());
466 #else
467     drv->glType = 0;
468     drv->glFormat = 0;
469 #endif
470 
471     if (alloc->mHal.state.usageFlags & ~RS_ALLOCATION_USAGE_SCRIPT) {
472         drv->uploadDeferred = true;
473     }
474 
475 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
476     drv->readBackFBO = nullptr;
477 #endif
478 
479     // fill out the initial state of the buffer if we couldn't use the user-provided ptr and USAGE_SHARED was accepted
480     if ((alloc->mHal.state.userProvidedPtr != 0) && (drv->useUserProvidedPtr == false)) {
481         rsdAllocationData2D(rsc, alloc, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X, alloc->getType()->getDimX(), alloc->getType()->getDimY(), alloc->mHal.state.userProvidedPtr, allocSize, 0);
482     }
483 
484 
485 #ifdef RS_FIND_OFFSETS
486     ALOGE("pointer for allocation: %p", alloc);
487     ALOGE("pointer for allocation.drv: %p", &alloc->mHal.drv);
488 #endif
489 
490 
491     return true;
492 }
493 
rsdAllocationInit(const Context * rsc,Allocation * alloc,bool forceZero)494 bool rsdAllocationInit(const Context *rsc, Allocation *alloc, bool forceZero) {
495     return rsdAllocationInitStrided(rsc, alloc, forceZero, Allocation::kMinimumRSAlignment);
496 }
497 
rsdAllocationAdapterOffset(const Context * rsc,const Allocation * alloc)498 void rsdAllocationAdapterOffset(const Context *rsc, const Allocation *alloc) {
499     //ALOGE("rsdAllocationAdapterOffset");
500 
501     // Get a base pointer to the new LOD
502     const Allocation *base = alloc->mHal.state.baseAlloc;
503     const Type *type = alloc->mHal.state.type;
504     if (base == nullptr) {
505         return;
506     }
507 
508     //ALOGE("rsdAllocationAdapterOffset  %p  %p", ptrA, ptrB);
509     //ALOGE("rsdAllocationAdapterOffset  lodCount %i", alloc->mHal.drvState.lodCount);
510 
511     const int lodBias = alloc->mHal.state.originLOD;
512     uint32_t lodCount = rsMax(alloc->mHal.drvState.lodCount, (uint32_t)1);
513     for (uint32_t lod=0; lod < lodCount; lod++) {
514         alloc->mHal.drvState.lod[lod] = base->mHal.drvState.lod[lod + lodBias];
515         alloc->mHal.drvState.lod[lod].mallocPtr = GetOffsetPtr(alloc,
516                       alloc->mHal.state.originX, alloc->mHal.state.originY, alloc->mHal.state.originZ,
517                       lodBias, (RsAllocationCubemapFace)alloc->mHal.state.originFace);
518     }
519 }
520 
rsdAllocationAdapterInit(const Context * rsc,Allocation * alloc)521 bool rsdAllocationAdapterInit(const Context *rsc, Allocation *alloc) {
522     DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
523     if (!drv) {
524         return false;
525     }
526     alloc->mHal.drv = drv;
527 
528     // We need to build an allocation that looks like a subset of the parent allocation
529     rsdAllocationAdapterOffset(rsc, alloc);
530 
531     return true;
532 }
533 
rsdAllocationDestroy(const Context * rsc,Allocation * alloc)534 void rsdAllocationDestroy(const Context *rsc, Allocation *alloc) {
535     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
536 
537     if (alloc->mHal.state.baseAlloc == nullptr) {
538 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
539         if (drv->bufferID) {
540             // Causes a SW crash....
541             //ALOGV(" mBufferID %i", mBufferID);
542             //glDeleteBuffers(1, &mBufferID);
543             //mBufferID = 0;
544         }
545         if (drv->textureID) {
546             RSD_CALL_GL(glDeleteTextures, 1, &drv->textureID);
547             drv->textureID = 0;
548         }
549         if (drv->renderTargetID) {
550             RSD_CALL_GL(glDeleteRenderbuffers, 1, &drv->renderTargetID);
551             drv->renderTargetID = 0;
552         }
553 #endif
554 
555         if (alloc->mHal.drvState.lod[0].mallocPtr) {
556             // don't free user-allocated ptrs or IO_OUTPUT buffers
557             if (!(drv->useUserProvidedPtr) &&
558                 !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) &&
559                 !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
560                     free(alloc->mHal.drvState.lod[0].mallocPtr);
561             }
562             alloc->mHal.drvState.lod[0].mallocPtr = nullptr;
563         }
564 
565 #ifndef RS_COMPATIBILITY_LIB
566 #ifndef RS_VENDOR_LIB
567         if (drv->readBackFBO != nullptr) {
568             delete drv->readBackFBO;
569             drv->readBackFBO = nullptr;
570         }
571 #endif
572         if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) &&
573             (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
574             ANativeWindow *nw = drv->wndSurface;
575             if (nw) {
576                 //If we have an attached surface, need to release it.
577                 AHardwareBuffer* ahwb = ANativeWindowBuffer_getHardwareBuffer(drv->wndBuffer);
578                 int fenceID = -1;
579                 AHardwareBuffer_unlock(ahwb, &fenceID);
580                 ANativeWindow_cancelBuffer(nw, drv->wndBuffer, fenceID);
581                 ANativeWindow_release(nw);
582                 drv->wndSurface = nullptr;
583                 drv->wndBuffer = nullptr;
584             }
585         }
586 #endif
587     }
588 
589     free(drv);
590     alloc->mHal.drv = nullptr;
591 }
592 
rsdAllocationResize(const Context * rsc,const Allocation * alloc,const Type * newType,bool zeroNew)593 void rsdAllocationResize(const Context *rsc, const Allocation *alloc,
594                          const Type *newType, bool zeroNew) {
595     const uint32_t oldDimX = alloc->mHal.drvState.lod[0].dimX;
596     const uint32_t dimX = newType->getDimX();
597 
598     // can't resize Allocations with user-allocated buffers
599     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
600         ALOGE("Resize cannot be called on a USAGE_SHARED allocation");
601         return;
602     }
603     void * oldPtr = alloc->mHal.drvState.lod[0].mallocPtr;
604     // Calculate the object size
605     size_t s = AllocationBuildPointerTable(rsc, alloc, newType, nullptr);
606     uint8_t *ptr = (uint8_t *)realloc(oldPtr, s);
607     // Build the relative pointer tables.
608     size_t verifySize = AllocationBuildPointerTable(rsc, alloc, newType, ptr);
609     if(s != verifySize) {
610         rsAssert(!"Size mismatch");
611     }
612 
613 
614     if (dimX > oldDimX) {
615         size_t stride = alloc->mHal.state.elementSizeBytes;
616         memset(((uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr) + stride * oldDimX,
617                  0, stride * (dimX - oldDimX));
618     }
619 }
620 
rsdAllocationSyncFromFBO(const Context * rsc,const Allocation * alloc)621 static void rsdAllocationSyncFromFBO(const Context *rsc, const Allocation *alloc) {
622 #if !defined(RS_VENDOR_LIB) && !defined(RS_COMPATIBILITY_LIB)
623     if (!alloc->getIsScript()) {
624         return; // nothing to sync
625     }
626 
627     RsdHal *dc = (RsdHal *)rsc->mHal.drv;
628     RsdFrameBufferObj *lastFbo = dc->gl.currentFrameBuffer;
629 
630     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
631     if (!drv->textureID && !drv->renderTargetID) {
632         return; // nothing was rendered here yet, so nothing to sync
633     }
634     if (drv->readBackFBO == nullptr) {
635         drv->readBackFBO = new RsdFrameBufferObj();
636         drv->readBackFBO->setColorTarget(drv, 0);
637         drv->readBackFBO->setDimensions(alloc->getType()->getDimX(),
638                                         alloc->getType()->getDimY());
639     }
640 
641     // Bind the framebuffer object so we can read back from it
642     drv->readBackFBO->setActive(rsc);
643 
644     // Do the readback
645     RSD_CALL_GL(glReadPixels, 0, 0, alloc->mHal.drvState.lod[0].dimX,
646                 alloc->mHal.drvState.lod[0].dimY,
647                 drv->glFormat, drv->glType, alloc->mHal.drvState.lod[0].mallocPtr);
648 
649     // Revert framebuffer to its original
650     lastFbo->setActive(rsc);
651 #endif
652 }
653 
654 
rsdAllocationSyncAll(const Context * rsc,const Allocation * alloc,RsAllocationUsageType src)655 void rsdAllocationSyncAll(const Context *rsc, const Allocation *alloc,
656                          RsAllocationUsageType src) {
657     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
658 
659     if (src == RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
660         if(!alloc->getIsRenderTarget()) {
661             rsc->setError(RS_ERROR_FATAL_DRIVER,
662                           "Attempting to sync allocation from render target, "
663                           "for non-render target allocation");
664         } else if (alloc->getType()->getElement()->getKind() != RS_KIND_PIXEL_RGBA) {
665             rsc->setError(RS_ERROR_FATAL_DRIVER, "Cannot only sync from RGBA"
666                                                  "render target");
667         } else {
668             rsdAllocationSyncFromFBO(rsc, alloc);
669         }
670         return;
671     }
672 
673     rsAssert(src == RS_ALLOCATION_USAGE_SCRIPT || src == RS_ALLOCATION_USAGE_SHARED);
674 
675     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
676         UploadToTexture(rsc, alloc);
677     } else {
678         if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) &&
679             !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
680             AllocateRenderTarget(rsc, alloc);
681         }
682     }
683     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
684         UploadToBufferObject(rsc, alloc);
685     }
686 
687     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
688 
689         if (src == RS_ALLOCATION_USAGE_SHARED) {
690             // just a memory fence for the CPU driver
691             // vendor drivers probably want to flush any dirty cachelines for
692             // this particular Allocation
693             __sync_synchronize();
694         }
695     }
696 
697     drv->uploadDeferred = false;
698 }
699 
rsdAllocationMarkDirty(const Context * rsc,const Allocation * alloc)700 void rsdAllocationMarkDirty(const Context *rsc, const Allocation *alloc) {
701     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
702     drv->uploadDeferred = true;
703 }
704 
705 #ifndef RS_COMPATIBILITY_LIB
IoGetBuffer(const Context * rsc,Allocation * alloc,ANativeWindow * nw)706 static bool IoGetBuffer(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
707     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
708     // Must lock the whole surface
709     int fenceID = -1;
710     int r = ANativeWindow_dequeueBuffer(nw, &drv->wndBuffer, &fenceID);
711     if (r) {
712         rsc->setError(RS_ERROR_DRIVER, "Error dequeueing IO output buffer.");
713         close(fenceID);
714         return false;
715     }
716 
717     void *dst = nullptr;
718     AHardwareBuffer* ahwb = ANativeWindowBuffer_getHardwareBuffer(drv->wndBuffer);
719     r = AHardwareBuffer_lock(ahwb, AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
720                              fenceID, NULL, &dst);
721     if (r) {
722         rsc->setError(RS_ERROR_DRIVER, "Error Locking IO output buffer.");
723         return false;
724     }
725     alloc->mHal.drvState.lod[0].mallocPtr = dst;
726     alloc->mHal.drvState.lod[0].stride = drv->wndBuffer->stride * alloc->mHal.state.elementSizeBytes;
727     rsAssert((alloc->mHal.drvState.lod[0].stride & 0xf) == 0);
728 
729     return true;
730 }
731 #endif
732 
rsdAllocationSetSurface(const Context * rsc,Allocation * alloc,ANativeWindow * nw)733 void rsdAllocationSetSurface(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
734 #ifndef RS_COMPATIBILITY_LIB
735     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
736 
737     // Cleanup old surface if there is one.
738     if (drv->wndSurface) {
739         ANativeWindow *old = drv->wndSurface;
740         AHardwareBuffer* ahwb = ANativeWindowBuffer_getHardwareBuffer(drv->wndBuffer);
741         int fenceID = -1;
742         int32_t r = AHardwareBuffer_unlock(ahwb, &fenceID);
743         if (r) {
744             rsc->setError(RS_ERROR_DRIVER, "Error unlocking output buffer.");
745             close(fenceID);
746             return;
747         }
748         r = ANativeWindow_cancelBuffer(old, drv->wndBuffer, fenceID);
749         if (r) {
750             rsc->setError(RS_ERROR_DRIVER, "Error canceling output buffer.");
751             return;
752         }
753         ANativeWindow_release(old);
754         drv->wndSurface = nullptr;
755         drv->wndBuffer = nullptr;
756     }
757 
758     if (nw) {
759         int32_t r = ANativeWindow_setBuffersGeometry(nw, alloc->mHal.drvState.lod[0].dimX,
760                                                  alloc->mHal.drvState.lod[0].dimY,
761                                                  WINDOW_FORMAT_RGBA_8888);
762         if (r) {
763             rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer geometry.");
764             return;
765         }
766 
767         if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
768             r = ANativeWindow_setUsage(nw,
769                     AHARDWAREBUFFER_USAGE_CPU_READ_RARELY | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN);
770             if (r) {
771                 rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
772                 return;
773             }
774         }
775 
776         IoGetBuffer(rsc, alloc, nw);
777         drv->wndSurface = nw;
778     }
779 
780     return;
781 #endif
782 }
783 
rsdAllocationIoSend(const Context * rsc,Allocation * alloc)784 void rsdAllocationIoSend(const Context *rsc, Allocation *alloc) {
785 #ifndef RS_COMPATIBILITY_LIB
786     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
787     ANativeWindow *nw = drv->wndSurface;
788 #ifndef RS_VENDOR_LIB
789     if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
790         RsdHal *dc = (RsdHal *)rsc->mHal.drv;
791         RSD_CALL_GL(eglSwapBuffers, dc->gl.egl.display, dc->gl.egl.surface);
792         return;
793     }
794 #endif
795     if (nw) {
796         if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
797             AHardwareBuffer* ahwb = ANativeWindowBuffer_getHardwareBuffer(drv->wndBuffer);
798             int fenceID = -1;
799             int32_t r = AHardwareBuffer_unlock(ahwb, &fenceID);
800             if (r) {
801                 rsc->setError(RS_ERROR_DRIVER, "Error unlock output buffer.");
802                 close(fenceID);
803                 return;
804             }
805             r = ANativeWindow_queueBuffer(nw, drv->wndBuffer, fenceID);
806             if (r) {
807                 rsc->setError(RS_ERROR_DRIVER, "Error sending IO output buffer.");
808                 return;
809             }
810             drv->wndBuffer = nullptr;
811             IoGetBuffer(rsc, alloc, nw);
812         }
813     } else {
814         rsc->setError(RS_ERROR_DRIVER, "Sent IO buffer with no attached surface.");
815         return;
816     }
817 #endif
818 }
819 
rsdAllocationIoReceive(const Context * rsc,Allocation * alloc)820 void rsdAllocationIoReceive(const Context *rsc, Allocation *alloc) {
821     if (alloc->mHal.state.yuv) {
822         DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
823     }
824 }
825 
826 
rsdAllocationData1D(const Context * rsc,const Allocation * alloc,uint32_t xoff,uint32_t lod,size_t count,const void * data,size_t sizeBytes)827 void rsdAllocationData1D(const Context *rsc, const Allocation *alloc,
828                          uint32_t xoff, uint32_t lod, size_t count,
829                          const void *data, size_t sizeBytes) {
830     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
831 
832     const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
833     uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
834     size_t size = count * eSize;
835     if (ptr != data) {
836         // Skip the copy if we are the same allocation. This can arise from
837         // our Bitmap optimization, where we share the same storage.
838         if (alloc->mHal.state.hasReferences) {
839             alloc->incRefs(data, count);
840             alloc->decRefs(ptr, count);
841         }
842         memcpy(ptr, data, size);
843     }
844     drv->uploadDeferred = true;
845 }
846 
rsdAllocationData2D(const Context * rsc,const Allocation * alloc,uint32_t xoff,uint32_t yoff,uint32_t lod,RsAllocationCubemapFace face,uint32_t w,uint32_t h,const void * data,size_t sizeBytes,size_t stride)847 void rsdAllocationData2D(const Context *rsc, const Allocation *alloc,
848                          uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
849                          uint32_t w, uint32_t h, const void *data, size_t sizeBytes, size_t stride) {
850     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
851 
852     size_t eSize = alloc->mHal.state.elementSizeBytes;
853     size_t lineSize = eSize * w;
854     if (!stride) {
855         stride = lineSize;
856     }
857 
858     if (alloc->mHal.drvState.lod[0].mallocPtr) {
859         const uint8_t *src = static_cast<const uint8_t *>(data);
860         uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
861         if (dst == src) {
862             // Skip the copy if we are the same allocation. This can arise from
863             // our Bitmap optimization, where we share the same storage.
864             drv->uploadDeferred = true;
865             return;
866         }
867 
868         for (uint32_t line=yoff; line < (yoff+h); line++) {
869             if (alloc->mHal.state.hasReferences) {
870                 alloc->incRefs(src, w);
871                 alloc->decRefs(dst, w);
872             }
873             memcpy(dst, src, lineSize);
874             src += stride;
875             dst += alloc->mHal.drvState.lod[lod].stride;
876         }
877         if (alloc->mHal.state.yuv) {
878             size_t clineSize = lineSize;
879             int lod = 1;
880             int maxLod = 2;
881             if (alloc->mHal.state.yuv == RS_YUV_YV12) {
882                 maxLod = 3;
883                 clineSize >>= 1;
884             } else if (alloc->mHal.state.yuv == RS_YUV_NV21) {
885                 lod = 2;
886                 maxLod = 3;
887             }
888 
889             while (lod < maxLod) {
890                 uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
891 
892                 for (uint32_t line=(yoff >> 1); line < ((yoff+h)>>1); line++) {
893                     memcpy(dst, src, clineSize);
894                     // When copying from an array to an Allocation, the src pointer
895                     // to the array should just move by the number of bytes copied.
896                     src += clineSize;
897                     dst += alloc->mHal.drvState.lod[lod].stride;
898                 }
899                 lod++;
900             }
901 
902         }
903         drv->uploadDeferred = true;
904     } else {
905         Update2DTexture(rsc, alloc, data, xoff, yoff, lod, face, w, h);
906     }
907 }
908 
rsdAllocationData3D(const Context * rsc,const Allocation * alloc,uint32_t xoff,uint32_t yoff,uint32_t zoff,uint32_t lod,uint32_t w,uint32_t h,uint32_t d,const void * data,size_t sizeBytes,size_t stride)909 void rsdAllocationData3D(const Context *rsc, const Allocation *alloc,
910                          uint32_t xoff, uint32_t yoff, uint32_t zoff,
911                          uint32_t lod,
912                          uint32_t w, uint32_t h, uint32_t d, const void *data,
913                          size_t sizeBytes, size_t stride) {
914     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
915 
916     uint32_t eSize = alloc->mHal.state.elementSizeBytes;
917     uint32_t lineSize = eSize * w;
918     if (!stride) {
919         stride = lineSize;
920     }
921 
922     if (alloc->mHal.drvState.lod[0].mallocPtr) {
923         const uint8_t *src = static_cast<const uint8_t *>(data);
924         for (uint32_t z = zoff; z < (d + zoff); z++) {
925             uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, z, lod,
926                                         RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
927             if (dst == src) {
928                 // Skip the copy if we are the same allocation. This can arise from
929                 // our Bitmap optimization, where we share the same storage.
930                 drv->uploadDeferred = true;
931                 return;
932             }
933 
934             for (uint32_t line=yoff; line < (yoff+h); line++) {
935                 if (alloc->mHal.state.hasReferences) {
936                     alloc->incRefs(src, w);
937                     alloc->decRefs(dst, w);
938                 }
939                 memcpy(dst, src, lineSize);
940                 src += stride;
941                 dst += alloc->mHal.drvState.lod[lod].stride;
942             }
943         }
944         drv->uploadDeferred = true;
945     }
946 }
947 
rsdAllocationRead1D(const Context * rsc,const Allocation * alloc,uint32_t xoff,uint32_t lod,size_t count,void * data,size_t sizeBytes)948 void rsdAllocationRead1D(const Context *rsc, const Allocation *alloc,
949                          uint32_t xoff, uint32_t lod, size_t count,
950                          void *data, size_t sizeBytes) {
951     const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
952     const uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
953     if (data != ptr) {
954         // Skip the copy if we are the same allocation. This can arise from
955         // our Bitmap optimization, where we share the same storage.
956         memcpy(data, ptr, count * eSize);
957     }
958 }
959 
rsdAllocationRead2D(const Context * rsc,const Allocation * alloc,uint32_t xoff,uint32_t yoff,uint32_t lod,RsAllocationCubemapFace face,uint32_t w,uint32_t h,void * data,size_t sizeBytes,size_t stride)960 void rsdAllocationRead2D(const Context *rsc, const Allocation *alloc,
961                                 uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
962                                 uint32_t w, uint32_t h, void *data, size_t sizeBytes, size_t stride) {
963     size_t eSize = alloc->mHal.state.elementSizeBytes;
964     size_t lineSize = eSize * w;
965     if (!stride) {
966         stride = lineSize;
967     }
968 
969     if (alloc->mHal.drvState.lod[0].mallocPtr) {
970         uint8_t *dst = static_cast<uint8_t *>(data);
971         const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
972         if (dst == src) {
973             // Skip the copy if we are the same allocation. This can arise from
974             // our Bitmap optimization, where we share the same storage.
975             return;
976         }
977 
978         for (uint32_t line=yoff; line < (yoff+h); line++) {
979             memcpy(dst, src, lineSize);
980             dst += stride;
981             src += alloc->mHal.drvState.lod[lod].stride;
982         }
983     } else {
984         ALOGE("Add code to readback from non-script memory");
985     }
986 }
987 
988 
rsdAllocationRead3D(const Context * rsc,const Allocation * alloc,uint32_t xoff,uint32_t yoff,uint32_t zoff,uint32_t lod,uint32_t w,uint32_t h,uint32_t d,void * data,size_t sizeBytes,size_t stride)989 void rsdAllocationRead3D(const Context *rsc, const Allocation *alloc,
990                          uint32_t xoff, uint32_t yoff, uint32_t zoff,
991                          uint32_t lod,
992                          uint32_t w, uint32_t h, uint32_t d, void *data, size_t sizeBytes, size_t stride) {
993     uint32_t eSize = alloc->mHal.state.elementSizeBytes;
994     uint32_t lineSize = eSize * w;
995     if (!stride) {
996         stride = lineSize;
997     }
998 
999     if (alloc->mHal.drvState.lod[0].mallocPtr) {
1000         uint8_t *dst = static_cast<uint8_t *>(data);
1001         for (uint32_t z = zoff; z < (d + zoff); z++) {
1002             const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, z, lod,
1003                                               RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1004             if (dst == src) {
1005                 // Skip the copy if we are the same allocation. This can arise from
1006                 // our Bitmap optimization, where we share the same storage.
1007                 return;
1008             }
1009 
1010             for (uint32_t line=yoff; line < (yoff+h); line++) {
1011                 memcpy(dst, src, lineSize);
1012                 dst += stride;
1013                 src += alloc->mHal.drvState.lod[lod].stride;
1014             }
1015         }
1016     }
1017 }
1018 
rsdAllocationLock1D(const android::renderscript::Context * rsc,const android::renderscript::Allocation * alloc)1019 void * rsdAllocationLock1D(const android::renderscript::Context *rsc,
1020                           const android::renderscript::Allocation *alloc) {
1021     return alloc->mHal.drvState.lod[0].mallocPtr;
1022 }
1023 
rsdAllocationUnlock1D(const android::renderscript::Context * rsc,const android::renderscript::Allocation * alloc)1024 void rsdAllocationUnlock1D(const android::renderscript::Context *rsc,
1025                           const android::renderscript::Allocation *alloc) {
1026 
1027 }
1028 
rsdAllocationData1D_alloc(const android::renderscript::Context * rsc,const android::renderscript::Allocation * dstAlloc,uint32_t dstXoff,uint32_t dstLod,size_t count,const android::renderscript::Allocation * srcAlloc,uint32_t srcXoff,uint32_t srcLod)1029 void rsdAllocationData1D_alloc(const android::renderscript::Context *rsc,
1030                                const android::renderscript::Allocation *dstAlloc,
1031                                uint32_t dstXoff, uint32_t dstLod, size_t count,
1032                                const android::renderscript::Allocation *srcAlloc,
1033                                uint32_t srcXoff, uint32_t srcLod) {
1034 }
1035 
1036 
rsdAllocationData2D_alloc_script(const android::renderscript::Context * rsc,const android::renderscript::Allocation * dstAlloc,uint32_t dstXoff,uint32_t dstYoff,uint32_t dstLod,RsAllocationCubemapFace dstFace,uint32_t w,uint32_t h,const android::renderscript::Allocation * srcAlloc,uint32_t srcXoff,uint32_t srcYoff,uint32_t srcLod,RsAllocationCubemapFace srcFace)1037 void rsdAllocationData2D_alloc_script(const android::renderscript::Context *rsc,
1038                                       const android::renderscript::Allocation *dstAlloc,
1039                                       uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
1040                                       RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
1041                                       const android::renderscript::Allocation *srcAlloc,
1042                                       uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
1043                                       RsAllocationCubemapFace srcFace) {
1044     size_t elementSize = dstAlloc->getType()->getElementSizeBytes();
1045     for (uint32_t i = 0; i < h; i ++) {
1046         uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, 0, dstLod, dstFace);
1047         uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, 0, srcLod, srcFace);
1048         memcpy(dstPtr, srcPtr, w * elementSize);
1049 
1050         //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
1051         //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
1052     }
1053 }
1054 
rsdAllocationData3D_alloc_script(const android::renderscript::Context * rsc,const android::renderscript::Allocation * dstAlloc,uint32_t dstXoff,uint32_t dstYoff,uint32_t dstZoff,uint32_t dstLod,uint32_t w,uint32_t h,uint32_t d,const android::renderscript::Allocation * srcAlloc,uint32_t srcXoff,uint32_t srcYoff,uint32_t srcZoff,uint32_t srcLod)1055 void rsdAllocationData3D_alloc_script(const android::renderscript::Context *rsc,
1056                                       const android::renderscript::Allocation *dstAlloc,
1057                                       uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff, uint32_t dstLod,
1058                                       uint32_t w, uint32_t h, uint32_t d,
1059                                       const android::renderscript::Allocation *srcAlloc,
1060                                       uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff, uint32_t srcLod) {
1061     uint32_t elementSize = dstAlloc->getType()->getElementSizeBytes();
1062     for (uint32_t j = 0; j < d; j++) {
1063         for (uint32_t i = 0; i < h; i ++) {
1064             uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, dstZoff + j,
1065                                            dstLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1066             uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, srcZoff + j,
1067                                            srcLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1068             memcpy(dstPtr, srcPtr, w * elementSize);
1069 
1070             //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
1071             //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
1072         }
1073     }
1074 }
1075 
rsdAllocationData2D_alloc(const android::renderscript::Context * rsc,const android::renderscript::Allocation * dstAlloc,uint32_t dstXoff,uint32_t dstYoff,uint32_t dstLod,RsAllocationCubemapFace dstFace,uint32_t w,uint32_t h,const android::renderscript::Allocation * srcAlloc,uint32_t srcXoff,uint32_t srcYoff,uint32_t srcLod,RsAllocationCubemapFace srcFace)1076 void rsdAllocationData2D_alloc(const android::renderscript::Context *rsc,
1077                                const android::renderscript::Allocation *dstAlloc,
1078                                uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
1079                                RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
1080                                const android::renderscript::Allocation *srcAlloc,
1081                                uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
1082                                RsAllocationCubemapFace srcFace) {
1083     if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
1084         rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
1085                                              "yet implemented.");
1086         return;
1087     }
1088     rsdAllocationData2D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff,
1089                                      dstLod, dstFace, w, h, srcAlloc,
1090                                      srcXoff, srcYoff, srcLod, srcFace);
1091 }
1092 
rsdAllocationData3D_alloc(const android::renderscript::Context * rsc,const android::renderscript::Allocation * dstAlloc,uint32_t dstXoff,uint32_t dstYoff,uint32_t dstZoff,uint32_t dstLod,uint32_t w,uint32_t h,uint32_t d,const android::renderscript::Allocation * srcAlloc,uint32_t srcXoff,uint32_t srcYoff,uint32_t srcZoff,uint32_t srcLod)1093 void rsdAllocationData3D_alloc(const android::renderscript::Context *rsc,
1094                                const android::renderscript::Allocation *dstAlloc,
1095                                uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff,
1096                                uint32_t dstLod,
1097                                uint32_t w, uint32_t h, uint32_t d,
1098                                const android::renderscript::Allocation *srcAlloc,
1099                                uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff,
1100                                uint32_t srcLod) {
1101     if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
1102         rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
1103                                              "yet implemented.");
1104         return;
1105     }
1106     rsdAllocationData3D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff, dstZoff,
1107                                      dstLod, w, h, d, srcAlloc,
1108                                      srcXoff, srcYoff, srcZoff, srcLod);
1109 }
1110 
rsdAllocationElementData(const Context * rsc,const Allocation * alloc,uint32_t x,uint32_t y,uint32_t z,const void * data,uint32_t cIdx,size_t sizeBytes)1111 void rsdAllocationElementData(const Context *rsc, const Allocation *alloc,
1112                               uint32_t x, uint32_t y, uint32_t z,
1113                               const void *data, uint32_t cIdx, size_t sizeBytes) {
1114     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1115 
1116     uint8_t * ptr = GetOffsetPtr(alloc, x, y, z, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1117 
1118     const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1119     ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1120 
1121     if (alloc->mHal.state.hasReferences) {
1122         e->incRefs(data);
1123         e->decRefs(ptr);
1124     }
1125 
1126     memcpy(ptr, data, sizeBytes);
1127     drv->uploadDeferred = true;
1128 }
1129 
rsdAllocationElementRead(const Context * rsc,const Allocation * alloc,uint32_t x,uint32_t y,uint32_t z,void * data,uint32_t cIdx,size_t sizeBytes)1130 void rsdAllocationElementRead(const Context *rsc, const Allocation *alloc,
1131                               uint32_t x, uint32_t y, uint32_t z,
1132                               void *data, uint32_t cIdx, size_t sizeBytes) {
1133     DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1134 
1135     uint8_t * ptr = GetOffsetPtr(alloc, x, y, z, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1136 
1137     const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1138     ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1139 
1140     memcpy(data, ptr, sizeBytes);
1141 }
1142 
mip565(const Allocation * alloc,int lod,RsAllocationCubemapFace face)1143 static void mip565(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1144     uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1145     uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1146 
1147     for (uint32_t y=0; y < h; y++) {
1148         uint16_t *oPtr = (uint16_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1149         const uint16_t *i1 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2, lod, face);
1150         const uint16_t *i2 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2+1, lod, face);
1151 
1152         for (uint32_t x=0; x < w; x++) {
1153             *oPtr = rsBoxFilter565(i1[0], i1[1], i2[0], i2[1]);
1154             oPtr ++;
1155             i1 += 2;
1156             i2 += 2;
1157         }
1158     }
1159 }
1160 
mip8888(const Allocation * alloc,int lod,RsAllocationCubemapFace face)1161 static void mip8888(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1162     uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1163     uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1164 
1165     for (uint32_t y=0; y < h; y++) {
1166         uint32_t *oPtr = (uint32_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1167         const uint32_t *i1 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
1168         const uint32_t *i2 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
1169 
1170         for (uint32_t x=0; x < w; x++) {
1171             *oPtr = rsBoxFilter8888(i1[0], i1[1], i2[0], i2[1]);
1172             oPtr ++;
1173             i1 += 2;
1174             i2 += 2;
1175         }
1176     }
1177 }
1178 
mip8(const Allocation * alloc,int lod,RsAllocationCubemapFace face)1179 static void mip8(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1180     uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1181     uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1182 
1183     for (uint32_t y=0; y < h; y++) {
1184         uint8_t *oPtr = GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1185         const uint8_t *i1 = GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
1186         const uint8_t *i2 = GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
1187 
1188         for (uint32_t x=0; x < w; x++) {
1189             *oPtr = (uint8_t)(((uint32_t)i1[0] + i1[1] + i2[0] + i2[1]) * 0.25f);
1190             oPtr ++;
1191             i1 += 2;
1192             i2 += 2;
1193         }
1194     }
1195 }
1196 
rsdAllocationGenerateMipmaps(const Context * rsc,const Allocation * alloc)1197 void rsdAllocationGenerateMipmaps(const Context *rsc, const Allocation *alloc) {
1198     if(!alloc->mHal.drvState.lod[0].mallocPtr) {
1199         return;
1200     }
1201     uint32_t numFaces = alloc->getType()->getDimFaces() ? 6 : 1;
1202     for (uint32_t face = 0; face < numFaces; face ++) {
1203         for (uint32_t lod=0; lod < (alloc->getType()->getLODCount() -1); lod++) {
1204             switch (alloc->getType()->getElement()->getSizeBits()) {
1205             case 32:
1206                 mip8888(alloc, lod, (RsAllocationCubemapFace)face);
1207                 break;
1208             case 16:
1209                 mip565(alloc, lod, (RsAllocationCubemapFace)face);
1210                 break;
1211             case 8:
1212                 mip8(alloc, lod, (RsAllocationCubemapFace)face);
1213                 break;
1214             }
1215         }
1216     }
1217 }
1218 
rsdAllocationGrallocBits(const android::renderscript::Context * rsc,android::renderscript::Allocation * alloc)1219 uint32_t rsdAllocationGrallocBits(const android::renderscript::Context *rsc,
1220                                   android::renderscript::Allocation *alloc)
1221 {
1222     return 0;
1223 }
1224 
rsdAllocationUpdateCachedObject(const Context * rsc,const Allocation * alloc,rs_allocation * obj)1225 void rsdAllocationUpdateCachedObject(const Context *rsc,
1226                                      const Allocation *alloc,
1227                                      rs_allocation *obj)
1228 {
1229     obj->p = alloc;
1230 #ifdef __LP64__
1231     obj->unused1 = nullptr;
1232     obj->unused2 = nullptr;
1233     obj->unused3 = nullptr;
1234 #endif
1235 }
1236