1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrDrawOpAtlas.h"
9
10 #include "GrContext.h"
11 #include "GrOpFlushState.h"
12 #include "GrRectanizer.h"
13 #include "GrResourceProvider.h"
14 #include "GrTracing.h"
15
Make(GrContext * ctx,GrPixelConfig config,int width,int height,int numPlotsX,int numPlotsY,GrDrawOpAtlas::EvictionFunc func,void * data)16 std::unique_ptr<GrDrawOpAtlas> GrDrawOpAtlas::Make(GrContext* ctx, GrPixelConfig config,
17 int width, int height,
18 int numPlotsX, int numPlotsY,
19 GrDrawOpAtlas::EvictionFunc func,
20 void* data) {
21 GrSurfaceDesc desc;
22 desc.fFlags = kNone_GrSurfaceFlags;
23 desc.fWidth = width;
24 desc.fHeight = height;
25 desc.fConfig = config;
26
27 // We don't want to flush the context so we claim we're in the middle of flushing so as to
28 // guarantee we do not recieve a texture with pending IO
29 // TODO: Determine how to avoid having to do this. (https://bug.skia.org/4156)
30 static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
31 sk_sp<GrTexture> texture(ctx->resourceProvider()->createApproxTexture(desc, kFlags));
32 if (!texture) {
33 return nullptr;
34 }
35
36 // MDB TODO: for now, wrap an instantiated texture. Having the deferred instantiation
37 // possess the correct properties (e.g., no pendingIO) should fall out of the system but
38 // should receive special attention.
39 // Note: When switching over to the deferred proxy, use the kExact flag to create
40 // the atlas and assert that the width & height are powers of 2.
41 sk_sp<GrTextureProxy> proxy = GrSurfaceProxy::MakeWrapped(std::move(texture));
42 if (!proxy) {
43 return nullptr;
44 }
45
46 std::unique_ptr<GrDrawOpAtlas> atlas(
47 new GrDrawOpAtlas(ctx, std::move(proxy), numPlotsX, numPlotsY));
48 atlas->registerEvictionCallback(func, data);
49 return atlas;
50 }
51
52
53 ////////////////////////////////////////////////////////////////////////////////
54
Plot(int index,uint64_t genID,int offX,int offY,int width,int height,GrPixelConfig config)55 GrDrawOpAtlas::Plot::Plot(int index, uint64_t genID, int offX, int offY, int width, int height,
56 GrPixelConfig config)
57 : fLastUpload(GrDrawOpUploadToken::AlreadyFlushedToken())
58 , fLastUse(GrDrawOpUploadToken::AlreadyFlushedToken())
59 , fIndex(index)
60 , fGenID(genID)
61 , fID(CreateId(fIndex, fGenID))
62 , fData(nullptr)
63 , fWidth(width)
64 , fHeight(height)
65 , fX(offX)
66 , fY(offY)
67 , fRects(nullptr)
68 , fOffset(SkIPoint16::Make(fX * fWidth, fY * fHeight))
69 , fConfig(config)
70 , fBytesPerPixel(GrBytesPerPixel(config))
71 #ifdef SK_DEBUG
72 , fDirty(false)
73 #endif
74 {
75 fDirtyRect.setEmpty();
76 }
77
~Plot()78 GrDrawOpAtlas::Plot::~Plot() {
79 sk_free(fData);
80 delete fRects;
81 }
82
addSubImage(int width,int height,const void * image,SkIPoint16 * loc)83 bool GrDrawOpAtlas::Plot::addSubImage(int width, int height, const void* image, SkIPoint16* loc) {
84 SkASSERT(width <= fWidth && height <= fHeight);
85
86 if (!fRects) {
87 fRects = GrRectanizer::Factory(fWidth, fHeight);
88 }
89
90 if (!fRects->addRect(width, height, loc)) {
91 return false;
92 }
93
94 if (!fData) {
95 fData = reinterpret_cast<unsigned char*>(sk_calloc_throw(fBytesPerPixel * fWidth *
96 fHeight));
97 }
98 size_t rowBytes = width * fBytesPerPixel;
99 const unsigned char* imagePtr = (const unsigned char*)image;
100 // point ourselves at the right starting spot
101 unsigned char* dataPtr = fData;
102 dataPtr += fBytesPerPixel * fWidth * loc->fY;
103 dataPtr += fBytesPerPixel * loc->fX;
104 // copy into the data buffer, swizzling as we go if this is ARGB data
105 if (4 == fBytesPerPixel && kSkia8888_GrPixelConfig == kBGRA_8888_GrPixelConfig) {
106 for (int i = 0; i < height; ++i) {
107 SkOpts::RGBA_to_BGRA(reinterpret_cast<uint32_t*>(dataPtr), imagePtr, width);
108 dataPtr += fBytesPerPixel * fWidth;
109 imagePtr += rowBytes;
110 }
111 } else {
112 for (int i = 0; i < height; ++i) {
113 memcpy(dataPtr, imagePtr, rowBytes);
114 dataPtr += fBytesPerPixel * fWidth;
115 imagePtr += rowBytes;
116 }
117 }
118
119 fDirtyRect.join(loc->fX, loc->fY, loc->fX + width, loc->fY + height);
120
121 loc->fX += fOffset.fX;
122 loc->fY += fOffset.fY;
123 SkDEBUGCODE(fDirty = true;)
124
125 return true;
126 }
127
uploadToTexture(GrDrawOp::WritePixelsFn & writePixels,GrTexture * texture)128 void GrDrawOpAtlas::Plot::uploadToTexture(GrDrawOp::WritePixelsFn& writePixels,
129 GrTexture* texture) {
130 // We should only be issuing uploads if we are in fact dirty
131 SkASSERT(fDirty && fData && texture);
132 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("skia.gpu"), "GrDrawOpAtlas::Plot::uploadToTexture");
133 size_t rowBytes = fBytesPerPixel * fWidth;
134 const unsigned char* dataPtr = fData;
135 dataPtr += rowBytes * fDirtyRect.fTop;
136 dataPtr += fBytesPerPixel * fDirtyRect.fLeft;
137 writePixels(texture, fOffset.fX + fDirtyRect.fLeft, fOffset.fY + fDirtyRect.fTop,
138 fDirtyRect.width(), fDirtyRect.height(), fConfig, dataPtr, rowBytes);
139 fDirtyRect.setEmpty();
140 SkDEBUGCODE(fDirty = false;)
141 }
142
resetRects()143 void GrDrawOpAtlas::Plot::resetRects() {
144 if (fRects) {
145 fRects->reset();
146 }
147
148 fGenID++;
149 fID = CreateId(fIndex, fGenID);
150
151 // zero out the plot
152 if (fData) {
153 sk_bzero(fData, fBytesPerPixel * fWidth * fHeight);
154 }
155
156 fDirtyRect.setEmpty();
157 SkDEBUGCODE(fDirty = false;)
158 }
159
160 ///////////////////////////////////////////////////////////////////////////////
161
GrDrawOpAtlas(GrContext * context,sk_sp<GrTextureProxy> proxy,int numPlotsX,int numPlotsY)162 GrDrawOpAtlas::GrDrawOpAtlas(GrContext* context, sk_sp<GrTextureProxy> proxy,
163 int numPlotsX, int numPlotsY)
164 : fContext(context)
165 , fProxy(std::move(proxy))
166 , fAtlasGeneration(kInvalidAtlasGeneration + 1) {
167 fPlotWidth = fProxy->width() / numPlotsX;
168 fPlotHeight = fProxy->height() / numPlotsY;
169 SkASSERT(numPlotsX * numPlotsY <= BulkUseTokenUpdater::kMaxPlots);
170 SkASSERT(fPlotWidth * numPlotsX == fProxy->width());
171 SkASSERT(fPlotHeight * numPlotsY == fProxy->height());
172
173 SkDEBUGCODE(fNumPlots = numPlotsX * numPlotsY;)
174
175 // We currently do not support compressed atlases...
176 SkASSERT(!GrPixelConfigIsCompressed(fProxy->desc().fConfig));
177
178 // set up allocated plots
179 fPlotArray.reset(new sk_sp<Plot>[ numPlotsX * numPlotsY ]);
180
181 sk_sp<Plot>* currPlot = fPlotArray.get();
182 for (int y = numPlotsY - 1, r = 0; y >= 0; --y, ++r) {
183 for (int x = numPlotsX - 1, c = 0; x >= 0; --x, ++c) {
184 uint32_t index = r * numPlotsX + c;
185 currPlot->reset(
186 new Plot(index, 1, x, y, fPlotWidth, fPlotHeight, fProxy->desc().fConfig));
187
188 // build LRU list
189 fPlotList.addToHead(currPlot->get());
190 ++currPlot;
191 }
192 }
193 }
194
processEviction(AtlasID id)195 void GrDrawOpAtlas::processEviction(AtlasID id) {
196 for (int i = 0; i < fEvictionCallbacks.count(); i++) {
197 (*fEvictionCallbacks[i].fFunc)(id, fEvictionCallbacks[i].fData);
198 }
199 }
200
updatePlot(GrDrawOp::Target * target,AtlasID * id,Plot * plot)201 inline bool GrDrawOpAtlas::updatePlot(GrDrawOp::Target* target, AtlasID* id, Plot* plot) {
202 this->makeMRU(plot);
203
204 // If our most recent upload has already occurred then we have to insert a new
205 // upload. Otherwise, we already have a scheduled upload that hasn't yet ocurred.
206 // This new update will piggy back on that previously scheduled update.
207 if (target->hasDrawBeenFlushed(plot->lastUploadToken())) {
208 // With c+14 we could move sk_sp into lamba to only ref once.
209 sk_sp<Plot> plotsp(SkRef(plot));
210
211 // MDB TODO: this is currently fine since the atlas' proxy is always pre-instantiated.
212 // Once it is deferred more care must be taken upon instantiation failure.
213 GrTexture* texture = fProxy->instantiate(fContext->resourceProvider());
214 if (!texture) {
215 return false;
216 }
217
218 GrDrawOpUploadToken lastUploadToken = target->addAsapUpload(
219 [plotsp, texture] (GrDrawOp::WritePixelsFn& writePixels) {
220 plotsp->uploadToTexture(writePixels, texture);
221 }
222 );
223 plot->setLastUploadToken(lastUploadToken);
224 }
225 *id = plot->id();
226 return true;
227 }
228
addToAtlas(AtlasID * id,GrDrawOp::Target * target,int width,int height,const void * image,SkIPoint16 * loc)229 bool GrDrawOpAtlas::addToAtlas(AtlasID* id, GrDrawOp::Target* target, int width, int height,
230 const void* image, SkIPoint16* loc) {
231 // We should already have a texture, TODO clean this up
232 SkASSERT(fProxy);
233 if (width > fPlotWidth || height > fPlotHeight) {
234 return false;
235 }
236
237 // now look through all allocated plots for one we can share, in Most Recently Refed order
238 PlotList::Iter plotIter;
239 plotIter.init(fPlotList, PlotList::Iter::kHead_IterStart);
240 Plot* plot;
241 while ((plot = plotIter.get())) {
242 SkASSERT(GrBytesPerPixel(fProxy->desc().fConfig) == plot->bpp());
243 if (plot->addSubImage(width, height, image, loc)) {
244 return this->updatePlot(target, id, plot);
245 }
246 plotIter.next();
247 }
248
249 // If the above fails, then see if the least recently refed plot has already been flushed to the
250 // gpu
251 plot = fPlotList.tail();
252 SkASSERT(plot);
253 if (target->hasDrawBeenFlushed(plot->lastUseToken())) {
254 this->processEviction(plot->id());
255 plot->resetRects();
256 SkASSERT(GrBytesPerPixel(fProxy->desc().fConfig) == plot->bpp());
257 SkDEBUGCODE(bool verify = )plot->addSubImage(width, height, image, loc);
258 SkASSERT(verify);
259 if (!this->updatePlot(target, id, plot)) {
260 return false;
261 }
262
263 fAtlasGeneration++;
264 return true;
265 }
266
267 // If this plot has been used in a draw that is currently being prepared by an op, then we have
268 // to fail. This gives the op a chance to enqueue the draw, and call back into this function.
269 // When that draw is enqueued, the draw token advances, and the subsequent call will continue
270 // past this branch and prepare an inline upload that will occur after the enqueued draw which
271 // references the plot's pre-upload content.
272 if (plot->lastUseToken() == target->nextDrawToken()) {
273 return false;
274 }
275
276 this->processEviction(plot->id());
277 fPlotList.remove(plot);
278 sk_sp<Plot>& newPlot = fPlotArray[plot->index()];
279 newPlot.reset(plot->clone());
280
281 fPlotList.addToHead(newPlot.get());
282 SkASSERT(GrBytesPerPixel(fProxy->desc().fConfig) == newPlot->bpp());
283 SkDEBUGCODE(bool verify = )newPlot->addSubImage(width, height, image, loc);
284 SkASSERT(verify);
285
286 // Note that this plot will be uploaded inline with the draws whereas the
287 // one it displaced most likely was uploaded asap.
288 // With c+14 we could move sk_sp into lambda to only ref once.
289 sk_sp<Plot> plotsp(SkRef(newPlot.get()));
290 // MDB TODO: this is currently fine since the atlas' proxy is always pre-instantiated.
291 // Once it is deferred more care must be taken upon instantiation failure.
292 GrTexture* texture = fProxy->instantiate(fContext->resourceProvider());
293 if (!texture) {
294 return false;
295 }
296
297 GrDrawOpUploadToken lastUploadToken = target->addInlineUpload(
298 [plotsp, texture] (GrDrawOp::WritePixelsFn& writePixels) {
299 plotsp->uploadToTexture(writePixels, texture);
300 }
301 );
302 newPlot->setLastUploadToken(lastUploadToken);
303
304 *id = newPlot->id();
305
306 fAtlasGeneration++;
307 return true;
308 }
309