1 /* libs/pixelflinger/scanline.cpp
2 **
3 ** Copyright 2006-2011, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18
19 #define LOG_TAG "pixelflinger"
20
21 #include <assert.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25
26 #include <cutils/memory.h>
27 #include <cutils/log.h>
28
29 #ifdef __arm__
30 #include <machine/cpu-features.h>
31 #endif
32
33 #include "buffer.h"
34 #include "scanline.h"
35
36 #include "codeflinger/CodeCache.h"
37 #include "codeflinger/GGLAssembler.h"
38 #if defined(__arm__)
39 #include "codeflinger/ARMAssembler.h"
40 #elif defined(__aarch64__)
41 #include "codeflinger/Arm64Assembler.h"
42 #elif defined(__mips__) && !defined(__LP64__) && __mips_isa_rev < 6
43 #include "codeflinger/MIPSAssembler.h"
44 #endif
45 //#include "codeflinger/ARMAssemblerOptimizer.h"
46
47 // ----------------------------------------------------------------------------
48
49 #define ANDROID_CODEGEN_GENERIC 0 // force generic pixel pipeline
50 #define ANDROID_CODEGEN_C 1 // hand-written C, fallback generic
51 #define ANDROID_CODEGEN_ASM 2 // hand-written asm, fallback generic
52 #define ANDROID_CODEGEN_GENERATED 3 // hand-written asm, fallback codegen
53
54 #ifdef NDEBUG
55 # define ANDROID_RELEASE
56 # define ANDROID_CODEGEN ANDROID_CODEGEN_GENERATED
57 #else
58 # define ANDROID_DEBUG
59 # define ANDROID_CODEGEN ANDROID_CODEGEN_GENERATED
60 #endif
61
62 #if defined(__arm__) || (defined(__mips__) && !defined(__LP64__) && __mips_isa_rev < 6) || defined(__aarch64__)
63 # define ANDROID_ARM_CODEGEN 1
64 #else
65 # define ANDROID_ARM_CODEGEN 0
66 #endif
67
68 #define DEBUG__CODEGEN_ONLY 0
69
70 /* Set to 1 to dump to the log the states that need a new
71 * code-generated scanline callback, i.e. those that don't
72 * have a corresponding shortcut function.
73 */
74 #define DEBUG_NEEDS 0
75
76 #if defined( __mips__) && !defined(__LP64__) && __mips_isa_rev < 6
77 #define ASSEMBLY_SCRATCH_SIZE 4096
78 #elif defined(__aarch64__)
79 #define ASSEMBLY_SCRATCH_SIZE 8192
80 #else
81 #define ASSEMBLY_SCRATCH_SIZE 2048
82 #endif
83
84 // ----------------------------------------------------------------------------
85 namespace android {
86 // ----------------------------------------------------------------------------
87
88 static void init_y(context_t*, int32_t);
89 static void init_y_noop(context_t*, int32_t);
90 static void init_y_packed(context_t*, int32_t);
91 static void init_y_error(context_t*, int32_t);
92
93 static void step_y__generic(context_t* c);
94 static void step_y__nop(context_t*);
95 static void step_y__smooth(context_t* c);
96 static void step_y__tmu(context_t* c);
97 static void step_y__w(context_t* c);
98
99 static void scanline(context_t* c);
100 static void scanline_perspective(context_t* c);
101 static void scanline_perspective_single(context_t* c);
102 static void scanline_t32cb16blend(context_t* c);
103 static void scanline_t32cb16blend_dither(context_t* c);
104 static void scanline_t32cb16blend_srca(context_t* c);
105 static void scanline_t32cb16blend_clamp(context_t* c);
106 static void scanline_t32cb16blend_clamp_dither(context_t* c);
107 static void scanline_t32cb16blend_clamp_mod(context_t* c);
108 static void scanline_x32cb16blend_clamp_mod(context_t* c);
109 static void scanline_t32cb16blend_clamp_mod_dither(context_t* c);
110 static void scanline_x32cb16blend_clamp_mod_dither(context_t* c);
111 static void scanline_t32cb16(context_t* c);
112 static void scanline_t32cb16_dither(context_t* c);
113 static void scanline_t32cb16_clamp(context_t* c);
114 static void scanline_t32cb16_clamp_dither(context_t* c);
115 static void scanline_col32cb16blend(context_t* c);
116 static void scanline_t16cb16_clamp(context_t* c);
117 static void scanline_t16cb16blend_clamp_mod(context_t* c);
118 static void scanline_memcpy(context_t* c);
119 static void scanline_memset8(context_t* c);
120 static void scanline_memset16(context_t* c);
121 static void scanline_memset32(context_t* c);
122 static void scanline_noop(context_t* c);
123 static void scanline_set(context_t* c);
124 static void scanline_clear(context_t* c);
125
126 static void rect_generic(context_t* c, size_t yc);
127 static void rect_memcpy(context_t* c, size_t yc);
128
129 #if defined( __arm__)
130 extern "C" void scanline_t32cb16blend_arm(uint16_t*, uint32_t*, size_t);
131 extern "C" void scanline_t32cb16_arm(uint16_t *dst, uint32_t *src, size_t ct);
132 extern "C" void scanline_col32cb16blend_neon(uint16_t *dst, uint32_t *col, size_t ct);
133 extern "C" void scanline_col32cb16blend_arm(uint16_t *dst, uint32_t col, size_t ct);
134 #elif defined(__aarch64__)
135 extern "C" void scanline_t32cb16blend_arm64(uint16_t*, uint32_t*, size_t);
136 extern "C" void scanline_col32cb16blend_arm64(uint16_t *dst, uint32_t col, size_t ct);
137 #elif defined(__mips__) && !defined(__LP64__) && __mips_isa_rev < 6
138 extern "C" void scanline_t32cb16blend_mips(uint16_t*, uint32_t*, size_t);
139 #endif
140
141 // ----------------------------------------------------------------------------
142
convertAbgr8888ToRgb565(uint32_t pix)143 static inline uint16_t convertAbgr8888ToRgb565(uint32_t pix)
144 {
145 return uint16_t( ((pix << 8) & 0xf800) |
146 ((pix >> 5) & 0x07e0) |
147 ((pix >> 19) & 0x001f) );
148 }
149
150 struct shortcut_t {
151 needs_filter_t filter;
152 const char* desc;
153 void (*scanline)(context_t*);
154 void (*init_y)(context_t*, int32_t);
155 };
156
157 // Keep in sync with needs
158
159 /* To understand the values here, have a look at:
160 * system/core/include/private/pixelflinger/ggl_context.h
161 *
162 * Especially the lines defining and using GGL_RESERVE_NEEDS
163 *
164 * Quick reminders:
165 * - the last nibble of the first value is the destination buffer format.
166 * - the last nibble of the third value is the source texture format
167 * - formats: 4=rgb565 1=abgr8888 2=xbgr8888
168 *
169 * In the descriptions below:
170 *
171 * SRC means we copy the source pixels to the destination
172 *
173 * SRC_OVER means we blend the source pixels to the destination
174 * with dstFactor = 1-srcA, srcFactor=1 (premultiplied source).
175 * This mode is otherwise called 'blend'.
176 *
177 * SRCA_OVER means we blend the source pixels to the destination
178 * with dstFactor=srcA*(1-srcA) srcFactor=srcA (non-premul source).
179 * This mode is otherwise called 'blend_srca'
180 *
181 * clamp means we fetch source pixels from a texture with u/v clamping
182 *
183 * mod means the source pixels are modulated (multiplied) by the
184 * a/r/g/b of the current context's color. Typically used for
185 * fade-in / fade-out.
186 *
187 * dither means we dither 32 bit values to 16 bits
188 */
189 static shortcut_t shortcuts[] = {
190 { { { 0x03515104, 0x00000077, { 0x00000A01, 0x00000000 } },
191 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
192 "565 fb, 8888 tx, blend SRC_OVER", scanline_t32cb16blend, init_y_noop },
193 { { { 0x03010104, 0x00000077, { 0x00000A01, 0x00000000 } },
194 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
195 "565 fb, 8888 tx, SRC", scanline_t32cb16, init_y_noop },
196 /* same as first entry, but with dithering */
197 { { { 0x03515104, 0x00000177, { 0x00000A01, 0x00000000 } },
198 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
199 "565 fb, 8888 tx, blend SRC_OVER dither", scanline_t32cb16blend_dither, init_y_noop },
200 /* same as second entry, but with dithering */
201 { { { 0x03010104, 0x00000177, { 0x00000A01, 0x00000000 } },
202 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
203 "565 fb, 8888 tx, SRC dither", scanline_t32cb16_dither, init_y_noop },
204 /* this is used during the boot animation - CHEAT: ignore dithering */
205 { { { 0x03545404, 0x00000077, { 0x00000A01, 0x00000000 } },
206 { 0xFFFFFFFF, 0xFFFFFEFF, { 0xFFFFFFFF, 0x0000003F } } },
207 "565 fb, 8888 tx, blend dst:ONE_MINUS_SRCA src:SRCA", scanline_t32cb16blend_srca, init_y_noop },
208 /* special case for arbitrary texture coordinates (think scaling) */
209 { { { 0x03515104, 0x00000077, { 0x00000001, 0x00000000 } },
210 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
211 "565 fb, 8888 tx, SRC_OVER clamp", scanline_t32cb16blend_clamp, init_y },
212 { { { 0x03515104, 0x00000177, { 0x00000001, 0x00000000 } },
213 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
214 "565 fb, 8888 tx, SRC_OVER clamp dither", scanline_t32cb16blend_clamp_dither, init_y },
215 /* another case used during emulation */
216 { { { 0x03515104, 0x00000077, { 0x00001001, 0x00000000 } },
217 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
218 "565 fb, 8888 tx, SRC_OVER clamp modulate", scanline_t32cb16blend_clamp_mod, init_y },
219 /* and this */
220 { { { 0x03515104, 0x00000077, { 0x00001002, 0x00000000 } },
221 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
222 "565 fb, x888 tx, SRC_OVER clamp modulate", scanline_x32cb16blend_clamp_mod, init_y },
223 { { { 0x03515104, 0x00000177, { 0x00001001, 0x00000000 } },
224 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
225 "565 fb, 8888 tx, SRC_OVER clamp modulate dither", scanline_t32cb16blend_clamp_mod_dither, init_y },
226 { { { 0x03515104, 0x00000177, { 0x00001002, 0x00000000 } },
227 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
228 "565 fb, x888 tx, SRC_OVER clamp modulate dither", scanline_x32cb16blend_clamp_mod_dither, init_y },
229 { { { 0x03010104, 0x00000077, { 0x00000001, 0x00000000 } },
230 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
231 "565 fb, 8888 tx, SRC clamp", scanline_t32cb16_clamp, init_y },
232 { { { 0x03010104, 0x00000077, { 0x00000002, 0x00000000 } },
233 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
234 "565 fb, x888 tx, SRC clamp", scanline_t32cb16_clamp, init_y },
235 { { { 0x03010104, 0x00000177, { 0x00000001, 0x00000000 } },
236 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
237 "565 fb, 8888 tx, SRC clamp dither", scanline_t32cb16_clamp_dither, init_y },
238 { { { 0x03010104, 0x00000177, { 0x00000002, 0x00000000 } },
239 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
240 "565 fb, x888 tx, SRC clamp dither", scanline_t32cb16_clamp_dither, init_y },
241 { { { 0x03010104, 0x00000077, { 0x00000004, 0x00000000 } },
242 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
243 "565 fb, 565 tx, SRC clamp", scanline_t16cb16_clamp, init_y },
244 { { { 0x03515104, 0x00000077, { 0x00001004, 0x00000000 } },
245 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0x0000003F } } },
246 "565 fb, 565 tx, SRC_OVER clamp", scanline_t16cb16blend_clamp_mod, init_y },
247 { { { 0x03515104, 0x00000077, { 0x00000000, 0x00000000 } },
248 { 0xFFFFFFFF, 0xFFFFFFFF, { 0xFFFFFFFF, 0xFFFFFFFF } } },
249 "565 fb, 8888 fixed color", scanline_col32cb16blend, init_y_packed },
250 { { { 0x00000000, 0x00000000, { 0x00000000, 0x00000000 } },
251 { 0x00000000, 0x00000007, { 0x00000000, 0x00000000 } } },
252 "(nop) alpha test", scanline_noop, init_y_noop },
253 { { { 0x00000000, 0x00000000, { 0x00000000, 0x00000000 } },
254 { 0x00000000, 0x00000070, { 0x00000000, 0x00000000 } } },
255 "(nop) depth test", scanline_noop, init_y_noop },
256 { { { 0x05000000, 0x00000000, { 0x00000000, 0x00000000 } },
257 { 0x0F000000, 0x00000080, { 0x00000000, 0x00000000 } } },
258 "(nop) logic_op", scanline_noop, init_y_noop },
259 { { { 0xF0000000, 0x00000000, { 0x00000000, 0x00000000 } },
260 { 0xF0000000, 0x00000080, { 0x00000000, 0x00000000 } } },
261 "(nop) color mask", scanline_noop, init_y_noop },
262 { { { 0x0F000000, 0x00000077, { 0x00000000, 0x00000000 } },
263 { 0xFF000000, 0x000000F7, { 0x00000000, 0x00000000 } } },
264 "(set) logic_op", scanline_set, init_y_noop },
265 { { { 0x00000000, 0x00000077, { 0x00000000, 0x00000000 } },
266 { 0xFF000000, 0x000000F7, { 0x00000000, 0x00000000 } } },
267 "(clear) logic_op", scanline_clear, init_y_noop },
268 { { { 0x03000000, 0x00000077, { 0x00000000, 0x00000000 } },
269 { 0xFFFFFF00, 0x000000F7, { 0x00000000, 0x00000000 } } },
270 "(clear) blending 0/0", scanline_clear, init_y_noop },
271 { { { 0x00000000, 0x00000000, { 0x00000000, 0x00000000 } },
272 { 0x0000003F, 0x00000000, { 0x00000000, 0x00000000 } } },
273 "(error) invalid color-buffer format", scanline_noop, init_y_error },
274 };
275 static const needs_filter_t noblend1to1 = {
276 // (disregard dithering, see below)
277 { 0x03010100, 0x00000077, { 0x00000A00, 0x00000000 } },
278 { 0xFFFFFFC0, 0xFFFFFEFF, { 0xFFFFFFC0, 0x0000003F } }
279 };
280 static const needs_filter_t fill16noblend = {
281 { 0x03010100, 0x00000077, { 0x00000000, 0x00000000 } },
282 { 0xFFFFFFC0, 0xFFFFFFFF, { 0x0000003F, 0x0000003F } }
283 };
284
285 // ----------------------------------------------------------------------------
286
287 #if ANDROID_ARM_CODEGEN
288
289 #if defined(__mips__) && !defined(__LP64__) && __mips_isa_rev < 6
290 static CodeCache gCodeCache(32 * 1024);
291 #elif defined(__aarch64__)
292 static CodeCache gCodeCache(48 * 1024);
293 #else
294 static CodeCache gCodeCache(12 * 1024);
295 #endif
296
297 class ScanlineAssembly : public Assembly {
298 AssemblyKey<needs_t> mKey;
299 public:
ScanlineAssembly(needs_t needs,size_t size)300 ScanlineAssembly(needs_t needs, size_t size)
301 : Assembly(size), mKey(needs) { }
key() const302 const AssemblyKey<needs_t>& key() const { return mKey; }
303 };
304 #endif
305
306 // ----------------------------------------------------------------------------
307
ggl_init_scanline(context_t * c)308 void ggl_init_scanline(context_t* c)
309 {
310 c->init_y = init_y;
311 c->step_y = step_y__generic;
312 c->scanline = scanline;
313 }
314
ggl_uninit_scanline(context_t * c)315 void ggl_uninit_scanline(context_t* c)
316 {
317 if (c->state.buffers.coverage)
318 free(c->state.buffers.coverage);
319 #if ANDROID_ARM_CODEGEN
320 if (c->scanline_as)
321 c->scanline_as->decStrong(c);
322 #endif
323 }
324
325 // ----------------------------------------------------------------------------
326
pick_scanline(context_t * c)327 static void pick_scanline(context_t* c)
328 {
329 #if (!defined(DEBUG__CODEGEN_ONLY) || (DEBUG__CODEGEN_ONLY == 0))
330
331 #if ANDROID_CODEGEN == ANDROID_CODEGEN_GENERIC
332 c->init_y = init_y;
333 c->step_y = step_y__generic;
334 c->scanline = scanline;
335 return;
336 #endif
337
338 //printf("*** needs [%08lx:%08lx:%08lx:%08lx]\n",
339 // c->state.needs.n, c->state.needs.p,
340 // c->state.needs.t[0], c->state.needs.t[1]);
341
342 // first handle the special case that we cannot test with a filter
343 const uint32_t cb_format = GGL_READ_NEEDS(CB_FORMAT, c->state.needs.n);
344 if (GGL_READ_NEEDS(T_FORMAT, c->state.needs.t[0]) == cb_format) {
345 if (c->state.needs.match(noblend1to1)) {
346 // this will match regardless of dithering state, since both
347 // src and dest have the same format anyway, there is no dithering
348 // to be done.
349 const GGLFormat* f =
350 &(c->formats[GGL_READ_NEEDS(T_FORMAT, c->state.needs.t[0])]);
351 if ((f->components == GGL_RGB) ||
352 (f->components == GGL_RGBA) ||
353 (f->components == GGL_LUMINANCE) ||
354 (f->components == GGL_LUMINANCE_ALPHA))
355 {
356 // format must have all of RGB components
357 // (so the current color doesn't show through)
358 c->scanline = scanline_memcpy;
359 c->init_y = init_y_noop;
360 return;
361 }
362 }
363 }
364
365 if (c->state.needs.match(fill16noblend)) {
366 c->init_y = init_y_packed;
367 switch (c->formats[cb_format].size) {
368 case 1: c->scanline = scanline_memset8; return;
369 case 2: c->scanline = scanline_memset16; return;
370 case 4: c->scanline = scanline_memset32; return;
371 }
372 }
373
374 const int numFilters = sizeof(shortcuts)/sizeof(shortcut_t);
375 for (int i=0 ; i<numFilters ; i++) {
376 if (c->state.needs.match(shortcuts[i].filter)) {
377 c->scanline = shortcuts[i].scanline;
378 c->init_y = shortcuts[i].init_y;
379 return;
380 }
381 }
382
383 #if DEBUG_NEEDS
384 ALOGI("Needs: n=0x%08x p=0x%08x t0=0x%08x t1=0x%08x",
385 c->state.needs.n, c->state.needs.p,
386 c->state.needs.t[0], c->state.needs.t[1]);
387 #endif
388
389 #endif // DEBUG__CODEGEN_ONLY
390
391 c->init_y = init_y;
392 c->step_y = step_y__generic;
393
394 #if ANDROID_ARM_CODEGEN
395 // we're going to have to generate some code...
396 // here, generate code for our pixel pipeline
397 const AssemblyKey<needs_t> key(c->state.needs);
398 sp<Assembly> assembly = gCodeCache.lookup(key);
399 if (assembly == 0) {
400 // create a new assembly region
401 sp<ScanlineAssembly> a = new ScanlineAssembly(c->state.needs,
402 ASSEMBLY_SCRATCH_SIZE);
403 // initialize our assembler
404 #if defined(__arm__)
405 GGLAssembler assembler( new ARMAssembler(a) );
406 //GGLAssembler assembler(
407 // new ARMAssemblerOptimizer(new ARMAssembler(a)) );
408 #endif
409 #if defined(__mips__)
410 GGLAssembler assembler( new ArmToMipsAssembler(a) );
411 #elif defined(__aarch64__)
412 GGLAssembler assembler( new ArmToArm64Assembler(a) );
413 #endif
414 // generate the scanline code for the given needs
415 bool err = assembler.scanline(c->state.needs, c) != 0;
416 if (ggl_likely(!err)) {
417 // finally, cache this assembly
418 err = gCodeCache.cache(a->key(), a) < 0;
419 }
420 if (ggl_unlikely(err)) {
421 ALOGE("error generating or caching assembly. Reverting to NOP.");
422 c->scanline = scanline_noop;
423 c->init_y = init_y_noop;
424 c->step_y = step_y__nop;
425 return;
426 }
427 assembly = a;
428 }
429
430 // release the previous assembly
431 if (c->scanline_as) {
432 c->scanline_as->decStrong(c);
433 }
434
435 //ALOGI("using generated pixel-pipeline");
436 c->scanline_as = assembly.get();
437 c->scanline_as->incStrong(c); // hold on to assembly
438 c->scanline = (void(*)(context_t* c))assembly->base();
439 #else
440 // ALOGW("using generic (slow) pixel-pipeline");
441 c->scanline = scanline;
442 #endif
443 }
444
ggl_pick_scanline(context_t * c)445 void ggl_pick_scanline(context_t* c)
446 {
447 pick_scanline(c);
448 if ((c->state.enables & GGL_ENABLE_W) &&
449 (c->state.enables & GGL_ENABLE_TMUS))
450 {
451 c->span = c->scanline;
452 c->scanline = scanline_perspective;
453 if (!(c->state.enabled_tmu & (c->state.enabled_tmu - 1))) {
454 // only one TMU enabled
455 c->scanline = scanline_perspective_single;
456 }
457 }
458 }
459
460 // ----------------------------------------------------------------------------
461
462 static void blending(context_t* c, pixel_t* fragment, pixel_t* fb);
463 static void blend_factor(context_t* c, pixel_t* r, uint32_t factor,
464 const pixel_t* src, const pixel_t* dst);
465 static void rescale(uint32_t& u, uint8_t& su, uint32_t& v, uint8_t& sv);
466
467 #if ANDROID_ARM_CODEGEN && (ANDROID_CODEGEN == ANDROID_CODEGEN_GENERATED)
468
469 // no need to compile the generic-pipeline, it can't be reached
scanline(context_t *)470 void scanline(context_t*)
471 {
472 }
473
474 #else
475
rescale(uint32_t & u,uint8_t & su,uint32_t & v,uint8_t & sv)476 void rescale(uint32_t& u, uint8_t& su, uint32_t& v, uint8_t& sv)
477 {
478 if (su && sv) {
479 if (su > sv) {
480 v = ggl_expand(v, sv, su);
481 sv = su;
482 } else if (su < sv) {
483 u = ggl_expand(u, su, sv);
484 su = sv;
485 }
486 }
487 }
488
blending(context_t * c,pixel_t * fragment,pixel_t * fb)489 void blending(context_t* c, pixel_t* fragment, pixel_t* fb)
490 {
491 rescale(fragment->c[0], fragment->s[0], fb->c[0], fb->s[0]);
492 rescale(fragment->c[1], fragment->s[1], fb->c[1], fb->s[1]);
493 rescale(fragment->c[2], fragment->s[2], fb->c[2], fb->s[2]);
494 rescale(fragment->c[3], fragment->s[3], fb->c[3], fb->s[3]);
495
496 pixel_t sf, df;
497 blend_factor(c, &sf, c->state.blend.src, fragment, fb);
498 blend_factor(c, &df, c->state.blend.dst, fragment, fb);
499
500 fragment->c[1] =
501 gglMulAddx(fragment->c[1], sf.c[1], gglMulx(fb->c[1], df.c[1]));
502 fragment->c[2] =
503 gglMulAddx(fragment->c[2], sf.c[2], gglMulx(fb->c[2], df.c[2]));
504 fragment->c[3] =
505 gglMulAddx(fragment->c[3], sf.c[3], gglMulx(fb->c[3], df.c[3]));
506
507 if (c->state.blend.alpha_separate) {
508 blend_factor(c, &sf, c->state.blend.src_alpha, fragment, fb);
509 blend_factor(c, &df, c->state.blend.dst_alpha, fragment, fb);
510 }
511
512 fragment->c[0] =
513 gglMulAddx(fragment->c[0], sf.c[0], gglMulx(fb->c[0], df.c[0]));
514
515 // clamp to 1.0
516 if (fragment->c[0] >= (1LU<<fragment->s[0]))
517 fragment->c[0] = (1<<fragment->s[0])-1;
518 if (fragment->c[1] >= (1LU<<fragment->s[1]))
519 fragment->c[1] = (1<<fragment->s[1])-1;
520 if (fragment->c[2] >= (1LU<<fragment->s[2]))
521 fragment->c[2] = (1<<fragment->s[2])-1;
522 if (fragment->c[3] >= (1LU<<fragment->s[3]))
523 fragment->c[3] = (1<<fragment->s[3])-1;
524 }
525
blendfactor(uint32_t x,uint32_t size,uint32_t def=0)526 static inline int blendfactor(uint32_t x, uint32_t size, uint32_t def = 0)
527 {
528 if (!size)
529 return def;
530
531 // scale to 16 bits
532 if (size > 16) {
533 x >>= (size - 16);
534 } else if (size < 16) {
535 x = ggl_expand(x, size, 16);
536 }
537 x += x >> 15;
538 return x;
539 }
540
blend_factor(context_t *,pixel_t * r,uint32_t factor,const pixel_t * src,const pixel_t * dst)541 void blend_factor(context_t* /*c*/, pixel_t* r,
542 uint32_t factor, const pixel_t* src, const pixel_t* dst)
543 {
544 switch (factor) {
545 case GGL_ZERO:
546 r->c[1] =
547 r->c[2] =
548 r->c[3] =
549 r->c[0] = 0;
550 break;
551 case GGL_ONE:
552 r->c[1] =
553 r->c[2] =
554 r->c[3] =
555 r->c[0] = FIXED_ONE;
556 break;
557 case GGL_DST_COLOR:
558 r->c[1] = blendfactor(dst->c[1], dst->s[1]);
559 r->c[2] = blendfactor(dst->c[2], dst->s[2]);
560 r->c[3] = blendfactor(dst->c[3], dst->s[3]);
561 r->c[0] = blendfactor(dst->c[0], dst->s[0]);
562 break;
563 case GGL_SRC_COLOR:
564 r->c[1] = blendfactor(src->c[1], src->s[1]);
565 r->c[2] = blendfactor(src->c[2], src->s[2]);
566 r->c[3] = blendfactor(src->c[3], src->s[3]);
567 r->c[0] = blendfactor(src->c[0], src->s[0]);
568 break;
569 case GGL_ONE_MINUS_DST_COLOR:
570 r->c[1] = FIXED_ONE - blendfactor(dst->c[1], dst->s[1]);
571 r->c[2] = FIXED_ONE - blendfactor(dst->c[2], dst->s[2]);
572 r->c[3] = FIXED_ONE - blendfactor(dst->c[3], dst->s[3]);
573 r->c[0] = FIXED_ONE - blendfactor(dst->c[0], dst->s[0]);
574 break;
575 case GGL_ONE_MINUS_SRC_COLOR:
576 r->c[1] = FIXED_ONE - blendfactor(src->c[1], src->s[1]);
577 r->c[2] = FIXED_ONE - blendfactor(src->c[2], src->s[2]);
578 r->c[3] = FIXED_ONE - blendfactor(src->c[3], src->s[3]);
579 r->c[0] = FIXED_ONE - blendfactor(src->c[0], src->s[0]);
580 break;
581 case GGL_SRC_ALPHA:
582 r->c[1] =
583 r->c[2] =
584 r->c[3] =
585 r->c[0] = blendfactor(src->c[0], src->s[0], FIXED_ONE);
586 break;
587 case GGL_ONE_MINUS_SRC_ALPHA:
588 r->c[1] =
589 r->c[2] =
590 r->c[3] =
591 r->c[0] = FIXED_ONE - blendfactor(src->c[0], src->s[0], FIXED_ONE);
592 break;
593 case GGL_DST_ALPHA:
594 r->c[1] =
595 r->c[2] =
596 r->c[3] =
597 r->c[0] = blendfactor(dst->c[0], dst->s[0], FIXED_ONE);
598 break;
599 case GGL_ONE_MINUS_DST_ALPHA:
600 r->c[1] =
601 r->c[2] =
602 r->c[3] =
603 r->c[0] = FIXED_ONE - blendfactor(dst->c[0], dst->s[0], FIXED_ONE);
604 break;
605 case GGL_SRC_ALPHA_SATURATE:
606 // XXX: GGL_SRC_ALPHA_SATURATE
607 break;
608 }
609 }
610
wrapping(int32_t coord,uint32_t size,int tx_wrap)611 static GGLfixed wrapping(int32_t coord, uint32_t size, int tx_wrap)
612 {
613 GGLfixed d;
614 if (tx_wrap == GGL_REPEAT) {
615 d = (uint32_t(coord)>>16) * size;
616 } else if (tx_wrap == GGL_CLAMP) { // CLAMP_TO_EDGE semantics
617 const GGLfixed clamp_min = FIXED_HALF;
618 const GGLfixed clamp_max = (size << 16) - FIXED_HALF;
619 if (coord < clamp_min) coord = clamp_min;
620 if (coord > clamp_max) coord = clamp_max;
621 d = coord;
622 } else { // 1:1
623 const GGLfixed clamp_min = 0;
624 const GGLfixed clamp_max = (size << 16);
625 if (coord < clamp_min) coord = clamp_min;
626 if (coord > clamp_max) coord = clamp_max;
627 d = coord;
628 }
629 return d;
630 }
631
632 static inline
ADJUST_COLOR_ITERATOR(GGLcolor v,GGLcolor dvdx,int len)633 GGLcolor ADJUST_COLOR_ITERATOR(GGLcolor v, GGLcolor dvdx, int len)
634 {
635 const int32_t end = dvdx * (len-1) + v;
636 if (end < 0)
637 v -= end;
638 v &= ~(v>>31);
639 return v;
640 }
641
scanline(context_t * c)642 void scanline(context_t* c)
643 {
644 const uint32_t enables = c->state.enables;
645 const int xs = c->iterators.xl;
646 const int x1 = c->iterators.xr;
647 int xc = x1 - xs;
648 const int16_t* covPtr = c->state.buffers.coverage + xs;
649
650 // All iterated values are sampled at the pixel center
651
652 // reset iterators for that scanline...
653 GGLcolor r, g, b, a;
654 iterators_t& ci = c->iterators;
655 if (enables & GGL_ENABLE_SMOOTH) {
656 r = (xs * c->shade.drdx) + ci.ydrdy;
657 g = (xs * c->shade.dgdx) + ci.ydgdy;
658 b = (xs * c->shade.dbdx) + ci.ydbdy;
659 a = (xs * c->shade.dadx) + ci.ydady;
660 r = ADJUST_COLOR_ITERATOR(r, c->shade.drdx, xc);
661 g = ADJUST_COLOR_ITERATOR(g, c->shade.dgdx, xc);
662 b = ADJUST_COLOR_ITERATOR(b, c->shade.dbdx, xc);
663 a = ADJUST_COLOR_ITERATOR(a, c->shade.dadx, xc);
664 } else {
665 r = ci.ydrdy;
666 g = ci.ydgdy;
667 b = ci.ydbdy;
668 a = ci.ydady;
669 }
670
671 // z iterators are 1.31
672 GGLfixed z = (xs * c->shade.dzdx) + ci.ydzdy;
673 GGLfixed f = (xs * c->shade.dfdx) + ci.ydfdy;
674
675 struct {
676 GGLfixed s, t;
677 } tc[GGL_TEXTURE_UNIT_COUNT];
678 if (enables & GGL_ENABLE_TMUS) {
679 for (int i=0 ; i<GGL_TEXTURE_UNIT_COUNT ; ++i) {
680 if (c->state.texture[i].enable) {
681 texture_iterators_t& ti = c->state.texture[i].iterators;
682 if (enables & GGL_ENABLE_W) {
683 tc[i].s = ti.ydsdy;
684 tc[i].t = ti.ydtdy;
685 } else {
686 tc[i].s = (xs * ti.dsdx) + ti.ydsdy;
687 tc[i].t = (xs * ti.dtdx) + ti.ydtdy;
688 }
689 }
690 }
691 }
692
693 pixel_t fragment;
694 pixel_t texel;
695 pixel_t fb;
696
697 uint32_t x = xs;
698 uint32_t y = c->iterators.y;
699
700 while (xc--) {
701
702 { // just a scope
703
704 // read color (convert to 8 bits by keeping only the integer part)
705 fragment.s[1] = fragment.s[2] =
706 fragment.s[3] = fragment.s[0] = 8;
707 fragment.c[1] = r >> (GGL_COLOR_BITS-8);
708 fragment.c[2] = g >> (GGL_COLOR_BITS-8);
709 fragment.c[3] = b >> (GGL_COLOR_BITS-8);
710 fragment.c[0] = a >> (GGL_COLOR_BITS-8);
711
712 // texturing
713 if (enables & GGL_ENABLE_TMUS) {
714 for (int i=0 ; i<GGL_TEXTURE_UNIT_COUNT ; ++i) {
715 texture_t& tx = c->state.texture[i];
716 if (!tx.enable)
717 continue;
718 texture_iterators_t& ti = tx.iterators;
719 int32_t u, v;
720
721 // s-coordinate
722 if (tx.s_coord != GGL_ONE_TO_ONE) {
723 const int w = tx.surface.width;
724 u = wrapping(tc[i].s, w, tx.s_wrap);
725 tc[i].s += ti.dsdx;
726 } else {
727 u = (((tx.shade.is0>>16) + x)<<16) + FIXED_HALF;
728 }
729
730 // t-coordinate
731 if (tx.t_coord != GGL_ONE_TO_ONE) {
732 const int h = tx.surface.height;
733 v = wrapping(tc[i].t, h, tx.t_wrap);
734 tc[i].t += ti.dtdx;
735 } else {
736 v = (((tx.shade.it0>>16) + y)<<16) + FIXED_HALF;
737 }
738
739 // read texture
740 if (tx.mag_filter == GGL_NEAREST &&
741 tx.min_filter == GGL_NEAREST)
742 {
743 u >>= 16;
744 v >>= 16;
745 tx.surface.read(&tx.surface, c, u, v, &texel);
746 } else {
747 const int w = tx.surface.width;
748 const int h = tx.surface.height;
749 u -= FIXED_HALF;
750 v -= FIXED_HALF;
751 int u0 = u >> 16;
752 int v0 = v >> 16;
753 int u1 = u0 + 1;
754 int v1 = v0 + 1;
755 if (tx.s_wrap == GGL_REPEAT) {
756 if (u0<0) u0 += w;
757 if (u1<0) u1 += w;
758 if (u0>=w) u0 -= w;
759 if (u1>=w) u1 -= w;
760 } else {
761 if (u0<0) u0 = 0;
762 if (u1<0) u1 = 0;
763 if (u0>=w) u0 = w-1;
764 if (u1>=w) u1 = w-1;
765 }
766 if (tx.t_wrap == GGL_REPEAT) {
767 if (v0<0) v0 += h;
768 if (v1<0) v1 += h;
769 if (v0>=h) v0 -= h;
770 if (v1>=h) v1 -= h;
771 } else {
772 if (v0<0) v0 = 0;
773 if (v1<0) v1 = 0;
774 if (v0>=h) v0 = h-1;
775 if (v1>=h) v1 = h-1;
776 }
777 pixel_t texels[4];
778 uint32_t mm[4];
779 tx.surface.read(&tx.surface, c, u0, v0, &texels[0]);
780 tx.surface.read(&tx.surface, c, u0, v1, &texels[1]);
781 tx.surface.read(&tx.surface, c, u1, v0, &texels[2]);
782 tx.surface.read(&tx.surface, c, u1, v1, &texels[3]);
783 u = (u >> 12) & 0xF;
784 v = (v >> 12) & 0xF;
785 u += u>>3;
786 v += v>>3;
787 mm[0] = (0x10 - u) * (0x10 - v);
788 mm[1] = (0x10 - u) * v;
789 mm[2] = u * (0x10 - v);
790 mm[3] = 0x100 - (mm[0] + mm[1] + mm[2]);
791 for (int j=0 ; j<4 ; j++) {
792 texel.s[j] = texels[0].s[j];
793 if (!texel.s[j]) continue;
794 texel.s[j] += 8;
795 texel.c[j] = texels[0].c[j]*mm[0] +
796 texels[1].c[j]*mm[1] +
797 texels[2].c[j]*mm[2] +
798 texels[3].c[j]*mm[3] ;
799 }
800 }
801
802 // Texture environnement...
803 for (int j=0 ; j<4 ; j++) {
804 uint32_t& Cf = fragment.c[j];
805 uint32_t& Ct = texel.c[j];
806 uint8_t& sf = fragment.s[j];
807 uint8_t& st = texel.s[j];
808 uint32_t At = texel.c[0];
809 uint8_t sat = texel.s[0];
810 switch (tx.env) {
811 case GGL_REPLACE:
812 if (st) {
813 Cf = Ct;
814 sf = st;
815 }
816 break;
817 case GGL_MODULATE:
818 if (st) {
819 uint32_t factor = Ct + (Ct>>(st-1));
820 Cf = (Cf * factor) >> st;
821 }
822 break;
823 case GGL_DECAL:
824 if (sat) {
825 rescale(Cf, sf, Ct, st);
826 Cf += ((Ct - Cf) * (At + (At>>(sat-1)))) >> sat;
827 }
828 break;
829 case GGL_BLEND:
830 if (st) {
831 uint32_t Cc = tx.env_color[i];
832 if (sf>8) Cc = (Cc * ((1<<sf)-1))>>8;
833 else if (sf<8) Cc = (Cc - (Cc>>(8-sf)))>>(8-sf);
834 uint32_t factor = Ct + (Ct>>(st-1));
835 Cf = ((((1<<st) - factor) * Cf) + Ct*Cc)>>st;
836 }
837 break;
838 case GGL_ADD:
839 if (st) {
840 rescale(Cf, sf, Ct, st);
841 Cf += Ct;
842 }
843 break;
844 }
845 }
846 }
847 }
848
849 // coverage application
850 if (enables & GGL_ENABLE_AA) {
851 int16_t cf = *covPtr++;
852 fragment.c[0] = (int64_t(fragment.c[0]) * cf) >> 15;
853 }
854
855 // alpha-test
856 if (enables & GGL_ENABLE_ALPHA_TEST) {
857 GGLcolor ref = c->state.alpha_test.ref;
858 GGLcolor alpha = (uint64_t(fragment.c[0]) *
859 ((1<<GGL_COLOR_BITS)-1)) / ((1<<fragment.s[0])-1);
860 switch (c->state.alpha_test.func) {
861 case GGL_NEVER: goto discard;
862 case GGL_LESS: if (alpha<ref) break; goto discard;
863 case GGL_EQUAL: if (alpha==ref) break; goto discard;
864 case GGL_LEQUAL: if (alpha<=ref) break; goto discard;
865 case GGL_GREATER: if (alpha>ref) break; goto discard;
866 case GGL_NOTEQUAL: if (alpha!=ref) break; goto discard;
867 case GGL_GEQUAL: if (alpha>=ref) break; goto discard;
868 }
869 }
870
871 // depth test
872 if (c->state.buffers.depth.format) {
873 if (enables & GGL_ENABLE_DEPTH_TEST) {
874 surface_t* cb = &(c->state.buffers.depth);
875 uint16_t* p = (uint16_t*)(cb->data)+(x+(cb->stride*y));
876 uint16_t zz = uint32_t(z)>>(16);
877 uint16_t depth = *p;
878 switch (c->state.depth_test.func) {
879 case GGL_NEVER: goto discard;
880 case GGL_LESS: if (zz<depth) break; goto discard;
881 case GGL_EQUAL: if (zz==depth) break; goto discard;
882 case GGL_LEQUAL: if (zz<=depth) break; goto discard;
883 case GGL_GREATER: if (zz>depth) break; goto discard;
884 case GGL_NOTEQUAL: if (zz!=depth) break; goto discard;
885 case GGL_GEQUAL: if (zz>=depth) break; goto discard;
886 }
887 // depth buffer is not enabled, if depth-test is not enabled
888 /*
889 fragment.s[1] = fragment.s[2] =
890 fragment.s[3] = fragment.s[0] = 8;
891 fragment.c[1] =
892 fragment.c[2] =
893 fragment.c[3] =
894 fragment.c[0] = 255 - (zz>>8);
895 */
896 if (c->state.mask.depth) {
897 *p = zz;
898 }
899 }
900 }
901
902 // fog
903 if (enables & GGL_ENABLE_FOG) {
904 for (int i=1 ; i<=3 ; i++) {
905 GGLfixed fc = (c->state.fog.color[i] * 0x10000) / 0xFF;
906 uint32_t& c = fragment.c[i];
907 uint8_t& s = fragment.s[i];
908 c = (c * 0x10000) / ((1<<s)-1);
909 c = gglMulAddx(c, f, gglMulx(fc, 0x10000 - f));
910 s = 16;
911 }
912 }
913
914 // blending
915 if (enables & GGL_ENABLE_BLENDING) {
916 fb.c[1] = fb.c[2] = fb.c[3] = fb.c[0] = 0; // placate valgrind
917 fb.s[1] = fb.s[2] = fb.s[3] = fb.s[0] = 0;
918 c->state.buffers.color.read(
919 &(c->state.buffers.color), c, x, y, &fb);
920 blending( c, &fragment, &fb );
921 }
922
923 // write
924 c->state.buffers.color.write(
925 &(c->state.buffers.color), c, x, y, &fragment);
926 }
927
928 discard:
929 // iterate...
930 x += 1;
931 if (enables & GGL_ENABLE_SMOOTH) {
932 r += c->shade.drdx;
933 g += c->shade.dgdx;
934 b += c->shade.dbdx;
935 a += c->shade.dadx;
936 }
937 z += c->shade.dzdx;
938 f += c->shade.dfdx;
939 }
940 }
941
942 #endif // ANDROID_ARM_CODEGEN && (ANDROID_CODEGEN == ANDROID_CODEGEN_GENERATED)
943
944 // ----------------------------------------------------------------------------
945 #if 0
946 #pragma mark -
947 #pragma mark Scanline
948 #endif
949
950 /* Used to parse a 32-bit source texture linearly. Usage is:
951 *
952 * horz_iterator32 hi(context);
953 * while (...) {
954 * uint32_t src_pixel = hi.get_pixel32();
955 * ...
956 * }
957 *
958 * Use only for one-to-one texture mapping.
959 */
960 struct horz_iterator32 {
horz_iterator32android::horz_iterator32961 horz_iterator32(context_t* c) {
962 const int x = c->iterators.xl;
963 const int y = c->iterators.y;
964 texture_t& tx = c->state.texture[0];
965 const int32_t u = (tx.shade.is0>>16) + x;
966 const int32_t v = (tx.shade.it0>>16) + y;
967 m_src = reinterpret_cast<uint32_t*>(tx.surface.data)+(u+(tx.surface.stride*v));
968 }
get_pixel32android::horz_iterator32969 uint32_t get_pixel32() {
970 return *m_src++;
971 }
972 protected:
973 uint32_t* m_src;
974 };
975
976 /* A variant for 16-bit source textures. */
977 struct horz_iterator16 {
horz_iterator16android::horz_iterator16978 horz_iterator16(context_t* c) {
979 const int x = c->iterators.xl;
980 const int y = c->iterators.y;
981 texture_t& tx = c->state.texture[0];
982 const int32_t u = (tx.shade.is0>>16) + x;
983 const int32_t v = (tx.shade.it0>>16) + y;
984 m_src = reinterpret_cast<uint16_t*>(tx.surface.data)+(u+(tx.surface.stride*v));
985 }
get_pixel16android::horz_iterator16986 uint16_t get_pixel16() {
987 return *m_src++;
988 }
989 protected:
990 uint16_t* m_src;
991 };
992
993 /* A clamp iterator is used to iterate inside a texture with GGL_CLAMP.
994 * After initialization, call get_src16() or get_src32() to get the current
995 * texture pixel value.
996 */
997 struct clamp_iterator {
clamp_iteratorandroid::clamp_iterator998 clamp_iterator(context_t* c) {
999 const int xs = c->iterators.xl;
1000 texture_t& tx = c->state.texture[0];
1001 texture_iterators_t& ti = tx.iterators;
1002 m_s = (xs * ti.dsdx) + ti.ydsdy;
1003 m_t = (xs * ti.dtdx) + ti.ydtdy;
1004 m_ds = ti.dsdx;
1005 m_dt = ti.dtdx;
1006 m_width_m1 = tx.surface.width - 1;
1007 m_height_m1 = tx.surface.height - 1;
1008 m_data = tx.surface.data;
1009 m_stride = tx.surface.stride;
1010 }
get_pixel16android::clamp_iterator1011 uint16_t get_pixel16() {
1012 int u, v;
1013 get_uv(u, v);
1014 uint16_t* src = reinterpret_cast<uint16_t*>(m_data) + (u + (m_stride*v));
1015 return src[0];
1016 }
get_pixel32android::clamp_iterator1017 uint32_t get_pixel32() {
1018 int u, v;
1019 get_uv(u, v);
1020 uint32_t* src = reinterpret_cast<uint32_t*>(m_data) + (u + (m_stride*v));
1021 return src[0];
1022 }
1023 private:
get_uvandroid::clamp_iterator1024 void get_uv(int& u, int& v) {
1025 int uu = m_s >> 16;
1026 int vv = m_t >> 16;
1027 if (uu < 0)
1028 uu = 0;
1029 if (uu > m_width_m1)
1030 uu = m_width_m1;
1031 if (vv < 0)
1032 vv = 0;
1033 if (vv > m_height_m1)
1034 vv = m_height_m1;
1035 u = uu;
1036 v = vv;
1037 m_s += m_ds;
1038 m_t += m_dt;
1039 }
1040
1041 GGLfixed m_s, m_t;
1042 GGLfixed m_ds, m_dt;
1043 int m_width_m1, m_height_m1;
1044 uint8_t* m_data;
1045 int m_stride;
1046 };
1047
1048 /*
1049 * The 'horizontal clamp iterator' variant corresponds to the case where
1050 * the 'v' coordinate doesn't change. This is useful to avoid one mult and
1051 * extra adds / checks per pixels, if the blending/processing operation after
1052 * this is very fast.
1053 */
is_context_horizontal(const context_t * c)1054 static int is_context_horizontal(const context_t* c) {
1055 return (c->state.texture[0].iterators.dtdx == 0);
1056 }
1057
1058 struct horz_clamp_iterator {
get_pixel16android::horz_clamp_iterator1059 uint16_t get_pixel16() {
1060 int u = m_s >> 16;
1061 m_s += m_ds;
1062 if (u < 0)
1063 u = 0;
1064 if (u > m_width_m1)
1065 u = m_width_m1;
1066 const uint16_t* src = reinterpret_cast<const uint16_t*>(m_data);
1067 return src[u];
1068 }
get_pixel32android::horz_clamp_iterator1069 uint32_t get_pixel32() {
1070 int u = m_s >> 16;
1071 m_s += m_ds;
1072 if (u < 0)
1073 u = 0;
1074 if (u > m_width_m1)
1075 u = m_width_m1;
1076 const uint32_t* src = reinterpret_cast<const uint32_t*>(m_data);
1077 return src[u];
1078 }
1079 protected:
1080 void init(const context_t* c, int shift);
1081 GGLfixed m_s;
1082 GGLfixed m_ds;
1083 int m_width_m1;
1084 const uint8_t* m_data;
1085 };
1086
init(const context_t * c,int shift)1087 void horz_clamp_iterator::init(const context_t* c, int shift)
1088 {
1089 const int xs = c->iterators.xl;
1090 const texture_t& tx = c->state.texture[0];
1091 const texture_iterators_t& ti = tx.iterators;
1092 m_s = (xs * ti.dsdx) + ti.ydsdy;
1093 m_ds = ti.dsdx;
1094 m_width_m1 = tx.surface.width-1;
1095 m_data = tx.surface.data;
1096
1097 GGLfixed t = (xs * ti.dtdx) + ti.ydtdy;
1098 int v = t >> 16;
1099 if (v < 0)
1100 v = 0;
1101 else if (v >= (int)tx.surface.height)
1102 v = (int)tx.surface.height-1;
1103
1104 m_data += (tx.surface.stride*v) << shift;
1105 }
1106
1107 struct horz_clamp_iterator16 : horz_clamp_iterator {
horz_clamp_iterator16android::horz_clamp_iterator161108 horz_clamp_iterator16(const context_t* c) {
1109 init(c,1);
1110 };
1111 };
1112
1113 struct horz_clamp_iterator32 : horz_clamp_iterator {
horz_clamp_iterator32android::horz_clamp_iterator321114 horz_clamp_iterator32(context_t* c) {
1115 init(c,2);
1116 };
1117 };
1118
1119 /* This is used to perform dithering operations.
1120 */
1121 struct ditherer {
dithererandroid::ditherer1122 ditherer(const context_t* c) {
1123 const int x = c->iterators.xl;
1124 const int y = c->iterators.y;
1125 m_line = &c->ditherMatrix[ ((y & GGL_DITHER_MASK)<<GGL_DITHER_ORDER_SHIFT) ];
1126 m_index = x & GGL_DITHER_MASK;
1127 }
stepandroid::ditherer1128 void step(void) {
1129 m_index++;
1130 }
get_valueandroid::ditherer1131 int get_value(void) {
1132 int ret = m_line[m_index & GGL_DITHER_MASK];
1133 m_index++;
1134 return ret;
1135 }
abgr8888ToRgb565android::ditherer1136 uint16_t abgr8888ToRgb565(uint32_t s) {
1137 uint32_t r = s & 0xff;
1138 uint32_t g = (s >> 8) & 0xff;
1139 uint32_t b = (s >> 16) & 0xff;
1140 return rgb888ToRgb565(r,g,b);
1141 }
1142 /* The following assumes that r/g/b are in the 0..255 range each */
rgb888ToRgb565android::ditherer1143 uint16_t rgb888ToRgb565(uint32_t& r, uint32_t& g, uint32_t &b) {
1144 int threshold = get_value();
1145 /* dither in on GGL_DITHER_BITS, and each of r, g, b is on 8 bits */
1146 r += (threshold >> (GGL_DITHER_BITS-8 +5));
1147 g += (threshold >> (GGL_DITHER_BITS-8 +6));
1148 b += (threshold >> (GGL_DITHER_BITS-8 +5));
1149 if (r > 0xff)
1150 r = 0xff;
1151 if (g > 0xff)
1152 g = 0xff;
1153 if (b > 0xff)
1154 b = 0xff;
1155 return uint16_t(((r & 0xf8) << 8) | ((g & 0xfc) << 3) | (b >> 3));
1156 }
1157 protected:
1158 const uint8_t* m_line;
1159 int m_index;
1160 };
1161
1162 /* This structure is used to blend (SRC_OVER) 32-bit source pixels
1163 * onto 16-bit destination ones. Usage is simply:
1164 *
1165 * blender.blend(<32-bit-src-pixel-value>,<ptr-to-16-bit-dest-pixel>)
1166 */
1167 struct blender_32to16 {
blender_32to16android::blender_32to161168 blender_32to16(context_t* /*c*/) { }
writeandroid::blender_32to161169 void write(uint32_t s, uint16_t* dst) {
1170 if (s == 0)
1171 return;
1172 s = GGL_RGBA_TO_HOST(s);
1173 int sA = (s>>24);
1174 if (sA == 0xff) {
1175 *dst = convertAbgr8888ToRgb565(s);
1176 } else {
1177 int f = 0x100 - (sA + (sA>>7));
1178 int sR = (s >> ( 3))&0x1F;
1179 int sG = (s >> ( 8+2))&0x3F;
1180 int sB = (s >> (16+3))&0x1F;
1181 uint16_t d = *dst;
1182 int dR = (d>>11)&0x1f;
1183 int dG = (d>>5)&0x3f;
1184 int dB = (d)&0x1f;
1185 sR += (f*dR)>>8;
1186 sG += (f*dG)>>8;
1187 sB += (f*dB)>>8;
1188 *dst = uint16_t((sR<<11)|(sG<<5)|sB);
1189 }
1190 }
writeandroid::blender_32to161191 void write(uint32_t s, uint16_t* dst, ditherer& di) {
1192 if (s == 0) {
1193 di.step();
1194 return;
1195 }
1196 s = GGL_RGBA_TO_HOST(s);
1197 int sA = (s>>24);
1198 if (sA == 0xff) {
1199 *dst = di.abgr8888ToRgb565(s);
1200 } else {
1201 int threshold = di.get_value() << (8 - GGL_DITHER_BITS);
1202 int f = 0x100 - (sA + (sA>>7));
1203 int sR = (s >> ( 3))&0x1F;
1204 int sG = (s >> ( 8+2))&0x3F;
1205 int sB = (s >> (16+3))&0x1F;
1206 uint16_t d = *dst;
1207 int dR = (d>>11)&0x1f;
1208 int dG = (d>>5)&0x3f;
1209 int dB = (d)&0x1f;
1210 sR = ((sR << 8) + f*dR + threshold)>>8;
1211 sG = ((sG << 8) + f*dG + threshold)>>8;
1212 sB = ((sB << 8) + f*dB + threshold)>>8;
1213 if (sR > 0x1f) sR = 0x1f;
1214 if (sG > 0x3f) sG = 0x3f;
1215 if (sB > 0x1f) sB = 0x1f;
1216 *dst = uint16_t((sR<<11)|(sG<<5)|sB);
1217 }
1218 }
1219 };
1220
1221 /* This blender does the same for the 'blend_srca' operation.
1222 * where dstFactor=srcA*(1-srcA) srcFactor=srcA
1223 */
1224 struct blender_32to16_srcA {
blender_32to16_srcAandroid::blender_32to16_srcA1225 blender_32to16_srcA(const context_t* /*c*/) { }
writeandroid::blender_32to16_srcA1226 void write(uint32_t s, uint16_t* dst) {
1227 if (!s) {
1228 return;
1229 }
1230 uint16_t d = *dst;
1231 s = GGL_RGBA_TO_HOST(s);
1232 int sR = (s >> ( 3))&0x1F;
1233 int sG = (s >> ( 8+2))&0x3F;
1234 int sB = (s >> (16+3))&0x1F;
1235 int sA = (s>>24);
1236 int f1 = (sA + (sA>>7));
1237 int f2 = 0x100-f1;
1238 int dR = (d>>11)&0x1f;
1239 int dG = (d>>5)&0x3f;
1240 int dB = (d)&0x1f;
1241 sR = (f1*sR + f2*dR)>>8;
1242 sG = (f1*sG + f2*dG)>>8;
1243 sB = (f1*sB + f2*dB)>>8;
1244 *dst = uint16_t((sR<<11)|(sG<<5)|sB);
1245 }
1246 };
1247
1248 /* Common init code the modulating blenders */
1249 struct blender_modulate {
initandroid::blender_modulate1250 void init(const context_t* c) {
1251 const int r = c->iterators.ydrdy >> (GGL_COLOR_BITS-8);
1252 const int g = c->iterators.ydgdy >> (GGL_COLOR_BITS-8);
1253 const int b = c->iterators.ydbdy >> (GGL_COLOR_BITS-8);
1254 const int a = c->iterators.ydady >> (GGL_COLOR_BITS-8);
1255 m_r = r + (r >> 7);
1256 m_g = g + (g >> 7);
1257 m_b = b + (b >> 7);
1258 m_a = a + (a >> 7);
1259 }
1260 protected:
1261 int m_r, m_g, m_b, m_a;
1262 };
1263
1264 /* This blender does a normal blend after modulation.
1265 */
1266 struct blender_32to16_modulate : blender_modulate {
blender_32to16_modulateandroid::blender_32to16_modulate1267 blender_32to16_modulate(const context_t* c) {
1268 init(c);
1269 }
writeandroid::blender_32to16_modulate1270 void write(uint32_t s, uint16_t* dst) {
1271 // blend source and destination
1272 if (!s) {
1273 return;
1274 }
1275 s = GGL_RGBA_TO_HOST(s);
1276
1277 /* We need to modulate s */
1278 uint32_t sA = (s >> 24);
1279 uint32_t sB = (s >> 16) & 0xff;
1280 uint32_t sG = (s >> 8) & 0xff;
1281 uint32_t sR = s & 0xff;
1282
1283 sA = (sA*m_a) >> 8;
1284 /* Keep R/G/B scaled to 5.8 or 6.8 fixed float format */
1285 sR = (sR*m_r) >> (8 - 5);
1286 sG = (sG*m_g) >> (8 - 6);
1287 sB = (sB*m_b) >> (8 - 5);
1288
1289 /* Now do a normal blend */
1290 int f = 0x100 - (sA + (sA>>7));
1291 uint16_t d = *dst;
1292 int dR = (d>>11)&0x1f;
1293 int dG = (d>>5)&0x3f;
1294 int dB = (d)&0x1f;
1295 sR = (sR + f*dR)>>8;
1296 sG = (sG + f*dG)>>8;
1297 sB = (sB + f*dB)>>8;
1298 *dst = uint16_t((sR<<11)|(sG<<5)|sB);
1299 }
writeandroid::blender_32to16_modulate1300 void write(uint32_t s, uint16_t* dst, ditherer& di) {
1301 // blend source and destination
1302 if (!s) {
1303 di.step();
1304 return;
1305 }
1306 s = GGL_RGBA_TO_HOST(s);
1307
1308 /* We need to modulate s */
1309 uint32_t sA = (s >> 24);
1310 uint32_t sB = (s >> 16) & 0xff;
1311 uint32_t sG = (s >> 8) & 0xff;
1312 uint32_t sR = s & 0xff;
1313
1314 sA = (sA*m_a) >> 8;
1315 /* keep R/G/B scaled to 5.8 or 6.8 fixed float format */
1316 sR = (sR*m_r) >> (8 - 5);
1317 sG = (sG*m_g) >> (8 - 6);
1318 sB = (sB*m_b) >> (8 - 5);
1319
1320 /* Scale threshold to 0.8 fixed float format */
1321 int threshold = di.get_value() << (8 - GGL_DITHER_BITS);
1322 int f = 0x100 - (sA + (sA>>7));
1323 uint16_t d = *dst;
1324 int dR = (d>>11)&0x1f;
1325 int dG = (d>>5)&0x3f;
1326 int dB = (d)&0x1f;
1327 sR = (sR + f*dR + threshold)>>8;
1328 sG = (sG + f*dG + threshold)>>8;
1329 sB = (sB + f*dB + threshold)>>8;
1330 if (sR > 0x1f) sR = 0x1f;
1331 if (sG > 0x3f) sG = 0x3f;
1332 if (sB > 0x1f) sB = 0x1f;
1333 *dst = uint16_t((sR<<11)|(sG<<5)|sB);
1334 }
1335 };
1336
1337 /* same as 32to16_modulate, except that the input is xRGB, instead of ARGB */
1338 struct blender_x32to16_modulate : blender_modulate {
blender_x32to16_modulateandroid::blender_x32to16_modulate1339 blender_x32to16_modulate(const context_t* c) {
1340 init(c);
1341 }
writeandroid::blender_x32to16_modulate1342 void write(uint32_t s, uint16_t* dst) {
1343 s = GGL_RGBA_TO_HOST(s);
1344
1345 uint32_t sB = (s >> 16) & 0xff;
1346 uint32_t sG = (s >> 8) & 0xff;
1347 uint32_t sR = s & 0xff;
1348
1349 /* Keep R/G/B in 5.8 or 6.8 format */
1350 sR = (sR*m_r) >> (8 - 5);
1351 sG = (sG*m_g) >> (8 - 6);
1352 sB = (sB*m_b) >> (8 - 5);
1353
1354 int f = 0x100 - m_a;
1355 uint16_t d = *dst;
1356 int dR = (d>>11)&0x1f;
1357 int dG = (d>>5)&0x3f;
1358 int dB = (d)&0x1f;
1359 sR = (sR + f*dR)>>8;
1360 sG = (sG + f*dG)>>8;
1361 sB = (sB + f*dB)>>8;
1362 *dst = uint16_t((sR<<11)|(sG<<5)|sB);
1363 }
writeandroid::blender_x32to16_modulate1364 void write(uint32_t s, uint16_t* dst, ditherer& di) {
1365 s = GGL_RGBA_TO_HOST(s);
1366
1367 uint32_t sB = (s >> 16) & 0xff;
1368 uint32_t sG = (s >> 8) & 0xff;
1369 uint32_t sR = s & 0xff;
1370
1371 sR = (sR*m_r) >> (8 - 5);
1372 sG = (sG*m_g) >> (8 - 6);
1373 sB = (sB*m_b) >> (8 - 5);
1374
1375 /* Now do a normal blend */
1376 int threshold = di.get_value() << (8 - GGL_DITHER_BITS);
1377 int f = 0x100 - m_a;
1378 uint16_t d = *dst;
1379 int dR = (d>>11)&0x1f;
1380 int dG = (d>>5)&0x3f;
1381 int dB = (d)&0x1f;
1382 sR = (sR + f*dR + threshold)>>8;
1383 sG = (sG + f*dG + threshold)>>8;
1384 sB = (sB + f*dB + threshold)>>8;
1385 if (sR > 0x1f) sR = 0x1f;
1386 if (sG > 0x3f) sG = 0x3f;
1387 if (sB > 0x1f) sB = 0x1f;
1388 *dst = uint16_t((sR<<11)|(sG<<5)|sB);
1389 }
1390 };
1391
1392 /* Same as above, but source is 16bit rgb565 */
1393 struct blender_16to16_modulate : blender_modulate {
blender_16to16_modulateandroid::blender_16to16_modulate1394 blender_16to16_modulate(const context_t* c) {
1395 init(c);
1396 }
writeandroid::blender_16to16_modulate1397 void write(uint16_t s16, uint16_t* dst) {
1398 uint32_t s = s16;
1399
1400 uint32_t sR = s >> 11;
1401 uint32_t sG = (s >> 5) & 0x3f;
1402 uint32_t sB = s & 0x1f;
1403
1404 sR = (sR*m_r);
1405 sG = (sG*m_g);
1406 sB = (sB*m_b);
1407
1408 int f = 0x100 - m_a;
1409 uint16_t d = *dst;
1410 int dR = (d>>11)&0x1f;
1411 int dG = (d>>5)&0x3f;
1412 int dB = (d)&0x1f;
1413 sR = (sR + f*dR)>>8;
1414 sG = (sG + f*dG)>>8;
1415 sB = (sB + f*dB)>>8;
1416 *dst = uint16_t((sR<<11)|(sG<<5)|sB);
1417 }
1418 };
1419
1420 /* This is used to iterate over a 16-bit destination color buffer.
1421 * Usage is:
1422 *
1423 * dst_iterator16 di(context);
1424 * while (di.count--) {
1425 * <do stuff with dest pixel at di.dst>
1426 * di.dst++;
1427 * }
1428 */
1429 struct dst_iterator16 {
dst_iterator16android::dst_iterator161430 dst_iterator16(const context_t* c) {
1431 const int x = c->iterators.xl;
1432 const int width = c->iterators.xr - x;
1433 const int32_t y = c->iterators.y;
1434 const surface_t* cb = &(c->state.buffers.color);
1435 count = width;
1436 dst = reinterpret_cast<uint16_t*>(cb->data) + (x+(cb->stride*y));
1437 }
1438 int count;
1439 uint16_t* dst;
1440 };
1441
1442
scanline_t32cb16_clamp(context_t * c)1443 static void scanline_t32cb16_clamp(context_t* c)
1444 {
1445 dst_iterator16 di(c);
1446
1447 if (is_context_horizontal(c)) {
1448 /* Special case for simple horizontal scaling */
1449 horz_clamp_iterator32 ci(c);
1450 while (di.count--) {
1451 uint32_t s = ci.get_pixel32();
1452 *di.dst++ = convertAbgr8888ToRgb565(s);
1453 }
1454 } else {
1455 /* General case */
1456 clamp_iterator ci(c);
1457 while (di.count--) {
1458 uint32_t s = ci.get_pixel32();
1459 *di.dst++ = convertAbgr8888ToRgb565(s);
1460 }
1461 }
1462 }
1463
scanline_t32cb16_dither(context_t * c)1464 static void scanline_t32cb16_dither(context_t* c)
1465 {
1466 horz_iterator32 si(c);
1467 dst_iterator16 di(c);
1468 ditherer dither(c);
1469
1470 while (di.count--) {
1471 uint32_t s = si.get_pixel32();
1472 *di.dst++ = dither.abgr8888ToRgb565(s);
1473 }
1474 }
1475
scanline_t32cb16_clamp_dither(context_t * c)1476 static void scanline_t32cb16_clamp_dither(context_t* c)
1477 {
1478 dst_iterator16 di(c);
1479 ditherer dither(c);
1480
1481 if (is_context_horizontal(c)) {
1482 /* Special case for simple horizontal scaling */
1483 horz_clamp_iterator32 ci(c);
1484 while (di.count--) {
1485 uint32_t s = ci.get_pixel32();
1486 *di.dst++ = dither.abgr8888ToRgb565(s);
1487 }
1488 } else {
1489 /* General case */
1490 clamp_iterator ci(c);
1491 while (di.count--) {
1492 uint32_t s = ci.get_pixel32();
1493 *di.dst++ = dither.abgr8888ToRgb565(s);
1494 }
1495 }
1496 }
1497
scanline_t32cb16blend_dither(context_t * c)1498 static void scanline_t32cb16blend_dither(context_t* c)
1499 {
1500 dst_iterator16 di(c);
1501 ditherer dither(c);
1502 blender_32to16 bl(c);
1503 horz_iterator32 hi(c);
1504 while (di.count--) {
1505 uint32_t s = hi.get_pixel32();
1506 bl.write(s, di.dst, dither);
1507 di.dst++;
1508 }
1509 }
1510
scanline_t32cb16blend_clamp(context_t * c)1511 static void scanline_t32cb16blend_clamp(context_t* c)
1512 {
1513 dst_iterator16 di(c);
1514 blender_32to16 bl(c);
1515
1516 if (is_context_horizontal(c)) {
1517 horz_clamp_iterator32 ci(c);
1518 while (di.count--) {
1519 uint32_t s = ci.get_pixel32();
1520 bl.write(s, di.dst);
1521 di.dst++;
1522 }
1523 } else {
1524 clamp_iterator ci(c);
1525 while (di.count--) {
1526 uint32_t s = ci.get_pixel32();
1527 bl.write(s, di.dst);
1528 di.dst++;
1529 }
1530 }
1531 }
1532
scanline_t32cb16blend_clamp_dither(context_t * c)1533 static void scanline_t32cb16blend_clamp_dither(context_t* c)
1534 {
1535 dst_iterator16 di(c);
1536 ditherer dither(c);
1537 blender_32to16 bl(c);
1538
1539 clamp_iterator ci(c);
1540 while (di.count--) {
1541 uint32_t s = ci.get_pixel32();
1542 bl.write(s, di.dst, dither);
1543 di.dst++;
1544 }
1545 }
1546
scanline_t32cb16blend_clamp_mod(context_t * c)1547 void scanline_t32cb16blend_clamp_mod(context_t* c)
1548 {
1549 dst_iterator16 di(c);
1550 blender_32to16_modulate bl(c);
1551
1552 clamp_iterator ci(c);
1553 while (di.count--) {
1554 uint32_t s = ci.get_pixel32();
1555 bl.write(s, di.dst);
1556 di.dst++;
1557 }
1558 }
1559
scanline_t32cb16blend_clamp_mod_dither(context_t * c)1560 void scanline_t32cb16blend_clamp_mod_dither(context_t* c)
1561 {
1562 dst_iterator16 di(c);
1563 blender_32to16_modulate bl(c);
1564 ditherer dither(c);
1565
1566 clamp_iterator ci(c);
1567 while (di.count--) {
1568 uint32_t s = ci.get_pixel32();
1569 bl.write(s, di.dst, dither);
1570 di.dst++;
1571 }
1572 }
1573
1574 /* Variant of scanline_t32cb16blend_clamp_mod with a xRGB texture */
scanline_x32cb16blend_clamp_mod(context_t * c)1575 void scanline_x32cb16blend_clamp_mod(context_t* c)
1576 {
1577 dst_iterator16 di(c);
1578 blender_x32to16_modulate bl(c);
1579
1580 clamp_iterator ci(c);
1581 while (di.count--) {
1582 uint32_t s = ci.get_pixel32();
1583 bl.write(s, di.dst);
1584 di.dst++;
1585 }
1586 }
1587
scanline_x32cb16blend_clamp_mod_dither(context_t * c)1588 void scanline_x32cb16blend_clamp_mod_dither(context_t* c)
1589 {
1590 dst_iterator16 di(c);
1591 blender_x32to16_modulate bl(c);
1592 ditherer dither(c);
1593
1594 clamp_iterator ci(c);
1595 while (di.count--) {
1596 uint32_t s = ci.get_pixel32();
1597 bl.write(s, di.dst, dither);
1598 di.dst++;
1599 }
1600 }
1601
scanline_t16cb16_clamp(context_t * c)1602 void scanline_t16cb16_clamp(context_t* c)
1603 {
1604 dst_iterator16 di(c);
1605
1606 /* Special case for simple horizontal scaling */
1607 if (is_context_horizontal(c)) {
1608 horz_clamp_iterator16 ci(c);
1609 while (di.count--) {
1610 *di.dst++ = ci.get_pixel16();
1611 }
1612 } else {
1613 clamp_iterator ci(c);
1614 while (di.count--) {
1615 *di.dst++ = ci.get_pixel16();
1616 }
1617 }
1618 }
1619
1620
1621
1622 template <typename T, typename U>
1623 static inline __attribute__((const))
interpolate(int y,T v0,U dvdx,U dvdy)1624 T interpolate(int y, T v0, U dvdx, U dvdy) {
1625 // interpolates in pixel's centers
1626 // v = v0 + (y + 0.5) * dvdy + (0.5 * dvdx)
1627 return (y * dvdy) + (v0 + ((dvdy + dvdx) >> 1));
1628 }
1629
1630 // ----------------------------------------------------------------------------
1631 #if 0
1632 #pragma mark -
1633 #endif
1634
init_y(context_t * c,int32_t ys)1635 void init_y(context_t* c, int32_t ys)
1636 {
1637 const uint32_t enables = c->state.enables;
1638
1639 // compute iterators...
1640 iterators_t& ci = c->iterators;
1641
1642 // sample in the center
1643 ci.y = ys;
1644
1645 if (enables & (GGL_ENABLE_DEPTH_TEST|GGL_ENABLE_W|GGL_ENABLE_FOG)) {
1646 ci.ydzdy = interpolate(ys, c->shade.z0, c->shade.dzdx, c->shade.dzdy);
1647 ci.ydwdy = interpolate(ys, c->shade.w0, c->shade.dwdx, c->shade.dwdy);
1648 ci.ydfdy = interpolate(ys, c->shade.f0, c->shade.dfdx, c->shade.dfdy);
1649 }
1650
1651 if (ggl_unlikely(enables & GGL_ENABLE_SMOOTH)) {
1652 ci.ydrdy = interpolate(ys, c->shade.r0, c->shade.drdx, c->shade.drdy);
1653 ci.ydgdy = interpolate(ys, c->shade.g0, c->shade.dgdx, c->shade.dgdy);
1654 ci.ydbdy = interpolate(ys, c->shade.b0, c->shade.dbdx, c->shade.dbdy);
1655 ci.ydady = interpolate(ys, c->shade.a0, c->shade.dadx, c->shade.dady);
1656 c->step_y = step_y__smooth;
1657 } else {
1658 ci.ydrdy = c->shade.r0;
1659 ci.ydgdy = c->shade.g0;
1660 ci.ydbdy = c->shade.b0;
1661 ci.ydady = c->shade.a0;
1662 // XXX: do only if needed, or make sure this is fast
1663 c->packed = ggl_pack_color(c, c->state.buffers.color.format,
1664 ci.ydrdy, ci.ydgdy, ci.ydbdy, ci.ydady);
1665 c->packed8888 = ggl_pack_color(c, GGL_PIXEL_FORMAT_RGBA_8888,
1666 ci.ydrdy, ci.ydgdy, ci.ydbdy, ci.ydady);
1667 }
1668
1669 // initialize the variables we need in the shader
1670 generated_vars_t& gen = c->generated_vars;
1671 gen.argb[GGLFormat::ALPHA].c = ci.ydady;
1672 gen.argb[GGLFormat::ALPHA].dx = c->shade.dadx;
1673 gen.argb[GGLFormat::RED ].c = ci.ydrdy;
1674 gen.argb[GGLFormat::RED ].dx = c->shade.drdx;
1675 gen.argb[GGLFormat::GREEN].c = ci.ydgdy;
1676 gen.argb[GGLFormat::GREEN].dx = c->shade.dgdx;
1677 gen.argb[GGLFormat::BLUE ].c = ci.ydbdy;
1678 gen.argb[GGLFormat::BLUE ].dx = c->shade.dbdx;
1679 gen.dzdx = c->shade.dzdx;
1680 gen.f = ci.ydfdy;
1681 gen.dfdx = c->shade.dfdx;
1682
1683 if (enables & GGL_ENABLE_TMUS) {
1684 for (int i=0 ; i<GGL_TEXTURE_UNIT_COUNT ; ++i) {
1685 texture_t& t = c->state.texture[i];
1686 if (!t.enable) continue;
1687
1688 texture_iterators_t& ti = t.iterators;
1689 if (t.s_coord == GGL_ONE_TO_ONE && t.t_coord == GGL_ONE_TO_ONE) {
1690 // we need to set all of these to 0 because in some cases
1691 // step_y__generic() or step_y__tmu() will be used and
1692 // therefore will update dtdy, however, in 1:1 mode
1693 // this is always done by the scanline rasterizer.
1694 ti.dsdx = ti.dsdy = ti.dtdx = ti.dtdy = 0;
1695 ti.ydsdy = t.shade.is0;
1696 ti.ydtdy = t.shade.it0;
1697 } else {
1698 const int adjustSWrap = ((t.s_wrap==GGL_CLAMP)?0:16);
1699 const int adjustTWrap = ((t.t_wrap==GGL_CLAMP)?0:16);
1700 ti.sscale = t.shade.sscale + adjustSWrap;
1701 ti.tscale = t.shade.tscale + adjustTWrap;
1702 if (!(enables & GGL_ENABLE_W)) {
1703 // S coordinate
1704 const int32_t sscale = ti.sscale;
1705 const int32_t sy = interpolate(ys,
1706 t.shade.is0, t.shade.idsdx, t.shade.idsdy);
1707 if (sscale>=0) {
1708 ti.ydsdy= sy << sscale;
1709 ti.dsdx = t.shade.idsdx << sscale;
1710 ti.dsdy = t.shade.idsdy << sscale;
1711 } else {
1712 ti.ydsdy= sy >> -sscale;
1713 ti.dsdx = t.shade.idsdx >> -sscale;
1714 ti.dsdy = t.shade.idsdy >> -sscale;
1715 }
1716 // T coordinate
1717 const int32_t tscale = ti.tscale;
1718 const int32_t ty = interpolate(ys,
1719 t.shade.it0, t.shade.idtdx, t.shade.idtdy);
1720 if (tscale>=0) {
1721 ti.ydtdy= ty << tscale;
1722 ti.dtdx = t.shade.idtdx << tscale;
1723 ti.dtdy = t.shade.idtdy << tscale;
1724 } else {
1725 ti.ydtdy= ty >> -tscale;
1726 ti.dtdx = t.shade.idtdx >> -tscale;
1727 ti.dtdy = t.shade.idtdy >> -tscale;
1728 }
1729 }
1730 }
1731 // mirror for generated code...
1732 generated_tex_vars_t& gen = c->generated_vars.texture[i];
1733 gen.width = t.surface.width;
1734 gen.height = t.surface.height;
1735 gen.stride = t.surface.stride;
1736 gen.data = uintptr_t(t.surface.data);
1737 gen.dsdx = ti.dsdx;
1738 gen.dtdx = ti.dtdx;
1739 }
1740 }
1741
1742 // choose the y-stepper
1743 c->step_y = step_y__nop;
1744 if (enables & GGL_ENABLE_FOG) {
1745 c->step_y = step_y__generic;
1746 } else if (enables & GGL_ENABLE_TMUS) {
1747 if (enables & GGL_ENABLE_SMOOTH) {
1748 c->step_y = step_y__generic;
1749 } else if (enables & GGL_ENABLE_W) {
1750 c->step_y = step_y__w;
1751 } else {
1752 c->step_y = step_y__tmu;
1753 }
1754 } else {
1755 if (enables & GGL_ENABLE_SMOOTH) {
1756 c->step_y = step_y__smooth;
1757 }
1758 }
1759
1760 // choose the rectangle blitter
1761 c->rect = rect_generic;
1762 if ((c->step_y == step_y__nop) &&
1763 (c->scanline == scanline_memcpy))
1764 {
1765 c->rect = rect_memcpy;
1766 }
1767 }
1768
init_y_packed(context_t * c,int32_t y0)1769 void init_y_packed(context_t* c, int32_t y0)
1770 {
1771 uint8_t f = c->state.buffers.color.format;
1772 c->packed = ggl_pack_color(c, f,
1773 c->shade.r0, c->shade.g0, c->shade.b0, c->shade.a0);
1774 c->packed8888 = ggl_pack_color(c, GGL_PIXEL_FORMAT_RGBA_8888,
1775 c->shade.r0, c->shade.g0, c->shade.b0, c->shade.a0);
1776 c->iterators.y = y0;
1777 c->step_y = step_y__nop;
1778 // choose the rectangle blitter
1779 c->rect = rect_generic;
1780 if (c->scanline == scanline_memcpy) {
1781 c->rect = rect_memcpy;
1782 }
1783 }
1784
init_y_noop(context_t * c,int32_t y0)1785 void init_y_noop(context_t* c, int32_t y0)
1786 {
1787 c->iterators.y = y0;
1788 c->step_y = step_y__nop;
1789 // choose the rectangle blitter
1790 c->rect = rect_generic;
1791 if (c->scanline == scanline_memcpy) {
1792 c->rect = rect_memcpy;
1793 }
1794 }
1795
init_y_error(context_t * c,int32_t y0)1796 void init_y_error(context_t* c, int32_t y0)
1797 {
1798 // woooops, shoud never happen,
1799 // fail gracefully (don't display anything)
1800 init_y_noop(c, y0);
1801 ALOGE("color-buffer has an invalid format!");
1802 }
1803
1804 // ----------------------------------------------------------------------------
1805 #if 0
1806 #pragma mark -
1807 #endif
1808
step_y__generic(context_t * c)1809 void step_y__generic(context_t* c)
1810 {
1811 const uint32_t enables = c->state.enables;
1812
1813 // iterate...
1814 iterators_t& ci = c->iterators;
1815 ci.y += 1;
1816
1817 if (enables & GGL_ENABLE_SMOOTH) {
1818 ci.ydrdy += c->shade.drdy;
1819 ci.ydgdy += c->shade.dgdy;
1820 ci.ydbdy += c->shade.dbdy;
1821 ci.ydady += c->shade.dady;
1822 }
1823
1824 const uint32_t mask =
1825 GGL_ENABLE_DEPTH_TEST |
1826 GGL_ENABLE_W |
1827 GGL_ENABLE_FOG;
1828 if (enables & mask) {
1829 ci.ydzdy += c->shade.dzdy;
1830 ci.ydwdy += c->shade.dwdy;
1831 ci.ydfdy += c->shade.dfdy;
1832 }
1833
1834 if ((enables & GGL_ENABLE_TMUS) && (!(enables & GGL_ENABLE_W))) {
1835 for (int i=0 ; i<GGL_TEXTURE_UNIT_COUNT ; ++i) {
1836 if (c->state.texture[i].enable) {
1837 texture_iterators_t& ti = c->state.texture[i].iterators;
1838 ti.ydsdy += ti.dsdy;
1839 ti.ydtdy += ti.dtdy;
1840 }
1841 }
1842 }
1843 }
1844
step_y__nop(context_t * c)1845 void step_y__nop(context_t* c)
1846 {
1847 c->iterators.y += 1;
1848 c->iterators.ydzdy += c->shade.dzdy;
1849 }
1850
step_y__smooth(context_t * c)1851 void step_y__smooth(context_t* c)
1852 {
1853 iterators_t& ci = c->iterators;
1854 ci.y += 1;
1855 ci.ydrdy += c->shade.drdy;
1856 ci.ydgdy += c->shade.dgdy;
1857 ci.ydbdy += c->shade.dbdy;
1858 ci.ydady += c->shade.dady;
1859 ci.ydzdy += c->shade.dzdy;
1860 }
1861
step_y__w(context_t * c)1862 void step_y__w(context_t* c)
1863 {
1864 iterators_t& ci = c->iterators;
1865 ci.y += 1;
1866 ci.ydzdy += c->shade.dzdy;
1867 ci.ydwdy += c->shade.dwdy;
1868 }
1869
step_y__tmu(context_t * c)1870 void step_y__tmu(context_t* c)
1871 {
1872 iterators_t& ci = c->iterators;
1873 ci.y += 1;
1874 ci.ydzdy += c->shade.dzdy;
1875 for (int i=0 ; i<GGL_TEXTURE_UNIT_COUNT ; ++i) {
1876 if (c->state.texture[i].enable) {
1877 texture_iterators_t& ti = c->state.texture[i].iterators;
1878 ti.ydsdy += ti.dsdy;
1879 ti.ydtdy += ti.dtdy;
1880 }
1881 }
1882 }
1883
1884 // ----------------------------------------------------------------------------
1885 #if 0
1886 #pragma mark -
1887 #endif
1888
scanline_perspective(context_t * c)1889 void scanline_perspective(context_t* c)
1890 {
1891 struct {
1892 union {
1893 struct {
1894 int32_t s, sq;
1895 int32_t t, tq;
1896 } sqtq;
1897 struct {
1898 int32_t v, q;
1899 } st[2];
1900 };
1901 } tc[GGL_TEXTURE_UNIT_COUNT] __attribute__((aligned(16)));
1902
1903 // XXX: we should have a special case when dwdx = 0
1904
1905 // 32 pixels spans works okay. 16 is a lot better,
1906 // but hey, it's a software renderer...
1907 const uint32_t SPAN_BITS = 5;
1908 const uint32_t ys = c->iterators.y;
1909 const uint32_t xs = c->iterators.xl;
1910 const uint32_t x1 = c->iterators.xr;
1911 const uint32_t xc = x1 - xs;
1912 uint32_t remainder = xc & ((1<<SPAN_BITS)-1);
1913 uint32_t numSpans = xc >> SPAN_BITS;
1914
1915 const iterators_t& ci = c->iterators;
1916 int32_t w0 = (xs * c->shade.dwdx) + ci.ydwdy;
1917 int32_t q0 = gglRecipQ(w0, 30);
1918 const int iwscale = 32 - gglClz(q0);
1919
1920 const int32_t dwdx = c->shade.dwdx << SPAN_BITS;
1921 int32_t xl = c->iterators.xl;
1922
1923 // We process s & t with a loop to reduce the code size
1924 // (and i-cache pressure).
1925
1926 for (int i=0 ; i<GGL_TEXTURE_UNIT_COUNT ; ++i) {
1927 const texture_t& tmu = c->state.texture[i];
1928 if (!tmu.enable) continue;
1929 int32_t s = tmu.shade.is0 +
1930 (tmu.shade.idsdy * ys) + (tmu.shade.idsdx * xs) +
1931 ((tmu.shade.idsdx + tmu.shade.idsdy)>>1);
1932 int32_t t = tmu.shade.it0 +
1933 (tmu.shade.idtdy * ys) + (tmu.shade.idtdx * xs) +
1934 ((tmu.shade.idtdx + tmu.shade.idtdy)>>1);
1935 tc[i].sqtq.s = s;
1936 tc[i].sqtq.t = t;
1937 tc[i].sqtq.sq = gglMulx(s, q0, iwscale);
1938 tc[i].sqtq.tq = gglMulx(t, q0, iwscale);
1939 }
1940
1941 int32_t span = 0;
1942 do {
1943 int32_t w1;
1944 if (ggl_likely(numSpans)) {
1945 w1 = w0 + dwdx;
1946 } else {
1947 if (remainder) {
1948 // finish off the scanline...
1949 span = remainder;
1950 w1 = (c->shade.dwdx * span) + w0;
1951 } else {
1952 break;
1953 }
1954 }
1955 int32_t q1 = gglRecipQ(w1, 30);
1956 for (int i=0 ; i<GGL_TEXTURE_UNIT_COUNT ; ++i) {
1957 texture_t& tmu = c->state.texture[i];
1958 if (!tmu.enable) continue;
1959 texture_iterators_t& ti = tmu.iterators;
1960
1961 for (int j=0 ; j<2 ; j++) {
1962 int32_t v = tc[i].st[j].v;
1963 if (span) v += (tmu.shade.st[j].dx)*span;
1964 else v += (tmu.shade.st[j].dx)<<SPAN_BITS;
1965 const int32_t v0 = tc[i].st[j].q;
1966 const int32_t v1 = gglMulx(v, q1, iwscale);
1967 int32_t dvdx = v1 - v0;
1968 if (span) dvdx /= span;
1969 else dvdx >>= SPAN_BITS;
1970 tc[i].st[j].v = v;
1971 tc[i].st[j].q = v1;
1972
1973 const int scale = ti.st[j].scale + (iwscale - 30);
1974 if (scale >= 0) {
1975 ti.st[j].ydvdy = v0 << scale;
1976 ti.st[j].dvdx = dvdx << scale;
1977 } else {
1978 ti.st[j].ydvdy = v0 >> -scale;
1979 ti.st[j].dvdx = dvdx >> -scale;
1980 }
1981 }
1982 generated_tex_vars_t& gen = c->generated_vars.texture[i];
1983 gen.dsdx = ti.st[0].dvdx;
1984 gen.dtdx = ti.st[1].dvdx;
1985 }
1986 c->iterators.xl = xl;
1987 c->iterators.xr = xl = xl + (span ? span : (1<<SPAN_BITS));
1988 w0 = w1;
1989 q0 = q1;
1990 c->span(c);
1991 } while(numSpans--);
1992 }
1993
scanline_perspective_single(context_t * c)1994 void scanline_perspective_single(context_t* c)
1995 {
1996 // 32 pixels spans works okay. 16 is a lot better,
1997 // but hey, it's a software renderer...
1998 const uint32_t SPAN_BITS = 5;
1999 const uint32_t ys = c->iterators.y;
2000 const uint32_t xs = c->iterators.xl;
2001 const uint32_t x1 = c->iterators.xr;
2002 const uint32_t xc = x1 - xs;
2003
2004 const iterators_t& ci = c->iterators;
2005 int32_t w = (xs * c->shade.dwdx) + ci.ydwdy;
2006 int32_t iw = gglRecipQ(w, 30);
2007 const int iwscale = 32 - gglClz(iw);
2008
2009 const int i = 31 - gglClz(c->state.enabled_tmu);
2010 generated_tex_vars_t& gen = c->generated_vars.texture[i];
2011 texture_t& tmu = c->state.texture[i];
2012 texture_iterators_t& ti = tmu.iterators;
2013 const int sscale = ti.sscale + (iwscale - 30);
2014 const int tscale = ti.tscale + (iwscale - 30);
2015 int32_t s = tmu.shade.is0 +
2016 (tmu.shade.idsdy * ys) + (tmu.shade.idsdx * xs) +
2017 ((tmu.shade.idsdx + tmu.shade.idsdy)>>1);
2018 int32_t t = tmu.shade.it0 +
2019 (tmu.shade.idtdy * ys) + (tmu.shade.idtdx * xs) +
2020 ((tmu.shade.idtdx + tmu.shade.idtdy)>>1);
2021 int32_t s0 = gglMulx(s, iw, iwscale);
2022 int32_t t0 = gglMulx(t, iw, iwscale);
2023 int32_t xl = c->iterators.xl;
2024
2025 int32_t sq, tq, dsdx, dtdx;
2026 int32_t premainder = xc & ((1<<SPAN_BITS)-1);
2027 uint32_t numSpans = xc >> SPAN_BITS;
2028 if (c->shade.dwdx == 0) {
2029 // XXX: we could choose to do this if the error is small enough
2030 numSpans = 0;
2031 premainder = xc;
2032 goto no_perspective;
2033 }
2034
2035 if (premainder) {
2036 w += c->shade.dwdx * premainder;
2037 iw = gglRecipQ(w, 30);
2038 no_perspective:
2039 s += tmu.shade.idsdx * premainder;
2040 t += tmu.shade.idtdx * premainder;
2041 sq = gglMulx(s, iw, iwscale);
2042 tq = gglMulx(t, iw, iwscale);
2043 dsdx = (sq - s0) / premainder;
2044 dtdx = (tq - t0) / premainder;
2045 c->iterators.xl = xl;
2046 c->iterators.xr = xl = xl + premainder;
2047 goto finish;
2048 }
2049
2050 while (numSpans--) {
2051 w += c->shade.dwdx << SPAN_BITS;
2052 s += tmu.shade.idsdx << SPAN_BITS;
2053 t += tmu.shade.idtdx << SPAN_BITS;
2054 iw = gglRecipQ(w, 30);
2055 sq = gglMulx(s, iw, iwscale);
2056 tq = gglMulx(t, iw, iwscale);
2057 dsdx = (sq - s0) >> SPAN_BITS;
2058 dtdx = (tq - t0) >> SPAN_BITS;
2059 c->iterators.xl = xl;
2060 c->iterators.xr = xl = xl + (1<<SPAN_BITS);
2061 finish:
2062 if (sscale >= 0) {
2063 ti.ydsdy = s0 << sscale;
2064 ti.dsdx = dsdx << sscale;
2065 } else {
2066 ti.ydsdy = s0 >>-sscale;
2067 ti.dsdx = dsdx >>-sscale;
2068 }
2069 if (tscale >= 0) {
2070 ti.ydtdy = t0 << tscale;
2071 ti.dtdx = dtdx << tscale;
2072 } else {
2073 ti.ydtdy = t0 >>-tscale;
2074 ti.dtdx = dtdx >>-tscale;
2075 }
2076 s0 = sq;
2077 t0 = tq;
2078 gen.dsdx = ti.dsdx;
2079 gen.dtdx = ti.dtdx;
2080 c->span(c);
2081 }
2082 }
2083
2084 // ----------------------------------------------------------------------------
2085
scanline_col32cb16blend(context_t * c)2086 void scanline_col32cb16blend(context_t* c)
2087 {
2088 int32_t x = c->iterators.xl;
2089 size_t ct = c->iterators.xr - x;
2090 int32_t y = c->iterators.y;
2091 surface_t* cb = &(c->state.buffers.color);
2092 union {
2093 uint16_t* dst;
2094 uint32_t* dst32;
2095 };
2096 dst = reinterpret_cast<uint16_t*>(cb->data) + (x+(cb->stride*y));
2097
2098 #if ((ANDROID_CODEGEN >= ANDROID_CODEGEN_ASM) && defined(__arm__))
2099 #if defined(__ARM_HAVE_NEON) && BYTE_ORDER == LITTLE_ENDIAN
2100 scanline_col32cb16blend_neon(dst, &(c->packed8888), ct);
2101 #else // defined(__ARM_HAVE_NEON) && BYTE_ORDER == LITTLE_ENDIAN
2102 scanline_col32cb16blend_arm(dst, GGL_RGBA_TO_HOST(c->packed8888), ct);
2103 #endif // defined(__ARM_HAVE_NEON) && BYTE_ORDER == LITTLE_ENDIAN
2104 #elif ((ANDROID_CODEGEN >= ANDROID_CODEGEN_ASM) && defined(__aarch64__))
2105 scanline_col32cb16blend_arm64(dst, GGL_RGBA_TO_HOST(c->packed8888), ct);
2106 #else
2107 uint32_t s = GGL_RGBA_TO_HOST(c->packed8888);
2108 int sA = (s>>24);
2109 int f = 0x100 - (sA + (sA>>7));
2110 while (ct--) {
2111 uint16_t d = *dst;
2112 int dR = (d>>11)&0x1f;
2113 int dG = (d>>5)&0x3f;
2114 int dB = (d)&0x1f;
2115 int sR = (s >> ( 3))&0x1F;
2116 int sG = (s >> ( 8+2))&0x3F;
2117 int sB = (s >> (16+3))&0x1F;
2118 sR += (f*dR)>>8;
2119 sG += (f*dG)>>8;
2120 sB += (f*dB)>>8;
2121 *dst++ = uint16_t((sR<<11)|(sG<<5)|sB);
2122 }
2123 #endif
2124
2125 }
2126
scanline_t32cb16(context_t * c)2127 void scanline_t32cb16(context_t* c)
2128 {
2129 int32_t x = c->iterators.xl;
2130 size_t ct = c->iterators.xr - x;
2131 int32_t y = c->iterators.y;
2132 surface_t* cb = &(c->state.buffers.color);
2133 union {
2134 uint16_t* dst;
2135 uint32_t* dst32;
2136 };
2137 dst = reinterpret_cast<uint16_t*>(cb->data) + (x+(cb->stride*y));
2138
2139 surface_t* tex = &(c->state.texture[0].surface);
2140 const int32_t u = (c->state.texture[0].shade.is0>>16) + x;
2141 const int32_t v = (c->state.texture[0].shade.it0>>16) + y;
2142 uint32_t *src = reinterpret_cast<uint32_t*>(tex->data)+(u+(tex->stride*v));
2143 int sR, sG, sB;
2144 uint32_t s, d;
2145
2146 if (ct==1 || uintptr_t(dst)&2) {
2147 last_one:
2148 s = GGL_RGBA_TO_HOST( *src++ );
2149 *dst++ = convertAbgr8888ToRgb565(s);
2150 ct--;
2151 }
2152
2153 while (ct >= 2) {
2154 #if BYTE_ORDER == BIG_ENDIAN
2155 s = GGL_RGBA_TO_HOST( *src++ );
2156 d = convertAbgr8888ToRgb565_hi16(s);
2157
2158 s = GGL_RGBA_TO_HOST( *src++ );
2159 d |= convertAbgr8888ToRgb565(s);
2160 #else
2161 s = GGL_RGBA_TO_HOST( *src++ );
2162 d = convertAbgr8888ToRgb565(s);
2163
2164 s = GGL_RGBA_TO_HOST( *src++ );
2165 d |= convertAbgr8888ToRgb565(s) << 16;
2166 #endif
2167 *dst32++ = d;
2168 ct -= 2;
2169 }
2170
2171 if (ct > 0) {
2172 goto last_one;
2173 }
2174 }
2175
scanline_t32cb16blend(context_t * c)2176 void scanline_t32cb16blend(context_t* c)
2177 {
2178 #if ((ANDROID_CODEGEN >= ANDROID_CODEGEN_ASM) && (defined(__arm__) || (defined(__mips__) && !defined(__LP64__) && __mips_isa_rev < 6) || defined(__aarch64__)))
2179 int32_t x = c->iterators.xl;
2180 size_t ct = c->iterators.xr - x;
2181 int32_t y = c->iterators.y;
2182 surface_t* cb = &(c->state.buffers.color);
2183 uint16_t* dst = reinterpret_cast<uint16_t*>(cb->data) + (x+(cb->stride*y));
2184
2185 surface_t* tex = &(c->state.texture[0].surface);
2186 const int32_t u = (c->state.texture[0].shade.is0>>16) + x;
2187 const int32_t v = (c->state.texture[0].shade.it0>>16) + y;
2188 uint32_t *src = reinterpret_cast<uint32_t*>(tex->data)+(u+(tex->stride*v));
2189
2190 #ifdef __arm__
2191 scanline_t32cb16blend_arm(dst, src, ct);
2192 #elif defined(__aarch64__)
2193 scanline_t32cb16blend_arm64(dst, src, ct);
2194 #elif defined(__mips__)
2195 scanline_t32cb16blend_mips(dst, src, ct);
2196 #endif
2197 #else
2198 dst_iterator16 di(c);
2199 horz_iterator32 hi(c);
2200 blender_32to16 bl(c);
2201 while (di.count--) {
2202 uint32_t s = hi.get_pixel32();
2203 bl.write(s, di.dst);
2204 di.dst++;
2205 }
2206 #endif
2207 }
2208
scanline_t32cb16blend_srca(context_t * c)2209 void scanline_t32cb16blend_srca(context_t* c)
2210 {
2211 dst_iterator16 di(c);
2212 horz_iterator32 hi(c);
2213 blender_32to16_srcA blender(c);
2214
2215 while (di.count--) {
2216 uint32_t s = hi.get_pixel32();
2217 blender.write(s,di.dst);
2218 di.dst++;
2219 }
2220 }
2221
scanline_t16cb16blend_clamp_mod(context_t * c)2222 void scanline_t16cb16blend_clamp_mod(context_t* c)
2223 {
2224 const int a = c->iterators.ydady >> (GGL_COLOR_BITS-8);
2225 if (a == 0) {
2226 return;
2227 }
2228
2229 if (a == 255) {
2230 scanline_t16cb16_clamp(c);
2231 return;
2232 }
2233
2234 dst_iterator16 di(c);
2235 blender_16to16_modulate blender(c);
2236 clamp_iterator ci(c);
2237
2238 while (di.count--) {
2239 uint16_t s = ci.get_pixel16();
2240 blender.write(s, di.dst);
2241 di.dst++;
2242 }
2243 }
2244
scanline_memcpy(context_t * c)2245 void scanline_memcpy(context_t* c)
2246 {
2247 int32_t x = c->iterators.xl;
2248 size_t ct = c->iterators.xr - x;
2249 int32_t y = c->iterators.y;
2250 surface_t* cb = &(c->state.buffers.color);
2251 const GGLFormat* fp = &(c->formats[cb->format]);
2252 uint8_t* dst = reinterpret_cast<uint8_t*>(cb->data) +
2253 (x + (cb->stride * y)) * fp->size;
2254
2255 surface_t* tex = &(c->state.texture[0].surface);
2256 const int32_t u = (c->state.texture[0].shade.is0>>16) + x;
2257 const int32_t v = (c->state.texture[0].shade.it0>>16) + y;
2258 uint8_t *src = reinterpret_cast<uint8_t*>(tex->data) +
2259 (u + (tex->stride * v)) * fp->size;
2260
2261 const size_t size = ct * fp->size;
2262 memcpy(dst, src, size);
2263 }
2264
scanline_memset8(context_t * c)2265 void scanline_memset8(context_t* c)
2266 {
2267 int32_t x = c->iterators.xl;
2268 size_t ct = c->iterators.xr - x;
2269 int32_t y = c->iterators.y;
2270 surface_t* cb = &(c->state.buffers.color);
2271 uint8_t* dst = reinterpret_cast<uint8_t*>(cb->data) + (x+(cb->stride*y));
2272 uint32_t packed = c->packed;
2273 memset(dst, packed, ct);
2274 }
2275
scanline_memset16(context_t * c)2276 void scanline_memset16(context_t* c)
2277 {
2278 int32_t x = c->iterators.xl;
2279 size_t ct = c->iterators.xr - x;
2280 int32_t y = c->iterators.y;
2281 surface_t* cb = &(c->state.buffers.color);
2282 uint16_t* dst = reinterpret_cast<uint16_t*>(cb->data) + (x+(cb->stride*y));
2283 uint32_t packed = c->packed;
2284 android_memset16(dst, packed, ct*2);
2285 }
2286
scanline_memset32(context_t * c)2287 void scanline_memset32(context_t* c)
2288 {
2289 int32_t x = c->iterators.xl;
2290 size_t ct = c->iterators.xr - x;
2291 int32_t y = c->iterators.y;
2292 surface_t* cb = &(c->state.buffers.color);
2293 uint32_t* dst = reinterpret_cast<uint32_t*>(cb->data) + (x+(cb->stride*y));
2294 uint32_t packed = GGL_HOST_TO_RGBA(c->packed);
2295 android_memset32(dst, packed, ct*4);
2296 }
2297
scanline_clear(context_t * c)2298 void scanline_clear(context_t* c)
2299 {
2300 int32_t x = c->iterators.xl;
2301 size_t ct = c->iterators.xr - x;
2302 int32_t y = c->iterators.y;
2303 surface_t* cb = &(c->state.buffers.color);
2304 const GGLFormat* fp = &(c->formats[cb->format]);
2305 uint8_t* dst = reinterpret_cast<uint8_t*>(cb->data) +
2306 (x + (cb->stride * y)) * fp->size;
2307 const size_t size = ct * fp->size;
2308 memset(dst, 0, size);
2309 }
2310
scanline_set(context_t * c)2311 void scanline_set(context_t* c)
2312 {
2313 int32_t x = c->iterators.xl;
2314 size_t ct = c->iterators.xr - x;
2315 int32_t y = c->iterators.y;
2316 surface_t* cb = &(c->state.buffers.color);
2317 const GGLFormat* fp = &(c->formats[cb->format]);
2318 uint8_t* dst = reinterpret_cast<uint8_t*>(cb->data) +
2319 (x + (cb->stride * y)) * fp->size;
2320 const size_t size = ct * fp->size;
2321 memset(dst, 0xFF, size);
2322 }
2323
scanline_noop(context_t *)2324 void scanline_noop(context_t* /*c*/)
2325 {
2326 }
2327
rect_generic(context_t * c,size_t yc)2328 void rect_generic(context_t* c, size_t yc)
2329 {
2330 do {
2331 c->scanline(c);
2332 c->step_y(c);
2333 } while (--yc);
2334 }
2335
rect_memcpy(context_t * c,size_t yc)2336 void rect_memcpy(context_t* c, size_t yc)
2337 {
2338 int32_t x = c->iterators.xl;
2339 size_t ct = c->iterators.xr - x;
2340 int32_t y = c->iterators.y;
2341 surface_t* cb = &(c->state.buffers.color);
2342 const GGLFormat* fp = &(c->formats[cb->format]);
2343 uint8_t* dst = reinterpret_cast<uint8_t*>(cb->data) +
2344 (x + (cb->stride * y)) * fp->size;
2345
2346 surface_t* tex = &(c->state.texture[0].surface);
2347 const int32_t u = (c->state.texture[0].shade.is0>>16) + x;
2348 const int32_t v = (c->state.texture[0].shade.it0>>16) + y;
2349 uint8_t *src = reinterpret_cast<uint8_t*>(tex->data) +
2350 (u + (tex->stride * v)) * fp->size;
2351
2352 if (cb->stride == tex->stride && ct == size_t(cb->stride)) {
2353 memcpy(dst, src, ct * fp->size * yc);
2354 } else {
2355 const size_t size = ct * fp->size;
2356 const size_t dbpr = cb->stride * fp->size;
2357 const size_t sbpr = tex->stride * fp->size;
2358 do {
2359 memcpy(dst, src, size);
2360 dst += dbpr;
2361 src += sbpr;
2362 } while (--yc);
2363 }
2364 }
2365 // ----------------------------------------------------------------------------
2366 }; // namespace android
2367
2368