1;
2; jidctint.asm - accurate integer IDCT (SSE2)
3;
4; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
5;
6; Based on
7; x86 SIMD extension for IJG JPEG library
8; Copyright (C) 1999-2006, MIYASAKA Masaru.
9; For conditions of distribution and use, see copyright notice in jsimdext.inc
10;
11; This file should be assembled with NASM (Netwide Assembler),
12; can *not* be assembled with Microsoft's MASM or any compatible
13; assembler (including Borland's Turbo Assembler).
14; NASM is available from http://nasm.sourceforge.net/ or
15; http://sourceforge.net/project/showfiles.php?group_id=6208
16;
17; This file contains a slow-but-accurate integer implementation of the
18; inverse DCT (Discrete Cosine Transform). The following code is based
19; directly on the IJG's original jidctint.c; see the jidctint.c for
20; more details.
21;
22; [TAB8]
23
24%include "jsimdext.inc"
25%include "jdct.inc"
26
27; --------------------------------------------------------------------------
28
29%define CONST_BITS      13
30%define PASS1_BITS      2
31
32%define DESCALE_P1      (CONST_BITS-PASS1_BITS)
33%define DESCALE_P2      (CONST_BITS+PASS1_BITS+3)
34
35%if CONST_BITS == 13
36F_0_298 equ      2446           ; FIX(0.298631336)
37F_0_390 equ      3196           ; FIX(0.390180644)
38F_0_541 equ      4433           ; FIX(0.541196100)
39F_0_765 equ      6270           ; FIX(0.765366865)
40F_0_899 equ      7373           ; FIX(0.899976223)
41F_1_175 equ      9633           ; FIX(1.175875602)
42F_1_501 equ     12299           ; FIX(1.501321110)
43F_1_847 equ     15137           ; FIX(1.847759065)
44F_1_961 equ     16069           ; FIX(1.961570560)
45F_2_053 equ     16819           ; FIX(2.053119869)
46F_2_562 equ     20995           ; FIX(2.562915447)
47F_3_072 equ     25172           ; FIX(3.072711026)
48%else
49; NASM cannot do compile-time arithmetic on floating-point constants.
50%define DESCALE(x,n)  (((x)+(1<<((n)-1)))>>(n))
51F_0_298 equ     DESCALE( 320652955,30-CONST_BITS)       ; FIX(0.298631336)
52F_0_390 equ     DESCALE( 418953276,30-CONST_BITS)       ; FIX(0.390180644)
53F_0_541 equ     DESCALE( 581104887,30-CONST_BITS)       ; FIX(0.541196100)
54F_0_765 equ     DESCALE( 821806413,30-CONST_BITS)       ; FIX(0.765366865)
55F_0_899 equ     DESCALE( 966342111,30-CONST_BITS)       ; FIX(0.899976223)
56F_1_175 equ     DESCALE(1262586813,30-CONST_BITS)       ; FIX(1.175875602)
57F_1_501 equ     DESCALE(1612031267,30-CONST_BITS)       ; FIX(1.501321110)
58F_1_847 equ     DESCALE(1984016188,30-CONST_BITS)       ; FIX(1.847759065)
59F_1_961 equ     DESCALE(2106220350,30-CONST_BITS)       ; FIX(1.961570560)
60F_2_053 equ     DESCALE(2204520673,30-CONST_BITS)       ; FIX(2.053119869)
61F_2_562 equ     DESCALE(2751909506,30-CONST_BITS)       ; FIX(2.562915447)
62F_3_072 equ     DESCALE(3299298341,30-CONST_BITS)       ; FIX(3.072711026)
63%endif
64
65; --------------------------------------------------------------------------
66        SECTION SEG_CONST
67
68        alignz  16
69        global  EXTN(jconst_idct_islow_sse2)
70
71EXTN(jconst_idct_islow_sse2):
72
73PW_F130_F054    times 4 dw  (F_0_541+F_0_765), F_0_541
74PW_F054_MF130   times 4 dw  F_0_541, (F_0_541-F_1_847)
75PW_MF078_F117   times 4 dw  (F_1_175-F_1_961), F_1_175
76PW_F117_F078    times 4 dw  F_1_175, (F_1_175-F_0_390)
77PW_MF060_MF089  times 4 dw  (F_0_298-F_0_899),-F_0_899
78PW_MF089_F060   times 4 dw -F_0_899, (F_1_501-F_0_899)
79PW_MF050_MF256  times 4 dw  (F_2_053-F_2_562),-F_2_562
80PW_MF256_F050   times 4 dw -F_2_562, (F_3_072-F_2_562)
81PD_DESCALE_P1   times 4 dd  1 << (DESCALE_P1-1)
82PD_DESCALE_P2   times 4 dd  1 << (DESCALE_P2-1)
83PB_CENTERJSAMP  times 16 db CENTERJSAMPLE
84
85        alignz  16
86
87; --------------------------------------------------------------------------
88        SECTION SEG_TEXT
89        BITS    32
90;
91; Perform dequantization and inverse DCT on one block of coefficients.
92;
93; GLOBAL(void)
94; jsimd_idct_islow_sse2 (void * dct_table, JCOEFPTR coef_block,
95;                        JSAMPARRAY output_buf, JDIMENSION output_col)
96;
97
98%define dct_table(b)    (b)+8           ; jpeg_component_info * compptr
99%define coef_block(b)   (b)+12          ; JCOEFPTR coef_block
100%define output_buf(b)   (b)+16          ; JSAMPARRAY output_buf
101%define output_col(b)   (b)+20          ; JDIMENSION output_col
102
103%define original_ebp    ebp+0
104%define wk(i)           ebp-(WK_NUM-(i))*SIZEOF_XMMWORD ; xmmword wk[WK_NUM]
105%define WK_NUM          12
106
107        align   16
108        global  EXTN(jsimd_idct_islow_sse2)
109
110EXTN(jsimd_idct_islow_sse2):
111        push    ebp
112        mov     eax,esp                         ; eax = original ebp
113        sub     esp, byte 4
114        and     esp, byte (-SIZEOF_XMMWORD)     ; align to 128 bits
115        mov     [esp],eax
116        mov     ebp,esp                         ; ebp = aligned ebp
117        lea     esp, [wk(0)]
118        pushpic ebx
119;       push    ecx             ; unused
120;       push    edx             ; need not be preserved
121        push    esi
122        push    edi
123
124        get_GOT ebx             ; get GOT address
125
126        ; ---- Pass 1: process columns from input.
127
128;       mov     eax, [original_ebp]
129        mov     edx, POINTER [dct_table(eax)]           ; quantptr
130        mov     esi, JCOEFPTR [coef_block(eax)]         ; inptr
131
132%ifndef NO_ZERO_COLUMN_TEST_ISLOW_SSE2
133        mov     eax, DWORD [DWBLOCK(1,0,esi,SIZEOF_JCOEF)]
134        or      eax, DWORD [DWBLOCK(2,0,esi,SIZEOF_JCOEF)]
135        jnz     near .columnDCT
136
137        movdqa  xmm0, XMMWORD [XMMBLOCK(1,0,esi,SIZEOF_JCOEF)]
138        movdqa  xmm1, XMMWORD [XMMBLOCK(2,0,esi,SIZEOF_JCOEF)]
139        por     xmm0, XMMWORD [XMMBLOCK(3,0,esi,SIZEOF_JCOEF)]
140        por     xmm1, XMMWORD [XMMBLOCK(4,0,esi,SIZEOF_JCOEF)]
141        por     xmm0, XMMWORD [XMMBLOCK(5,0,esi,SIZEOF_JCOEF)]
142        por     xmm1, XMMWORD [XMMBLOCK(6,0,esi,SIZEOF_JCOEF)]
143        por     xmm0, XMMWORD [XMMBLOCK(7,0,esi,SIZEOF_JCOEF)]
144        por     xmm1,xmm0
145        packsswb xmm1,xmm1
146        packsswb xmm1,xmm1
147        movd    eax,xmm1
148        test    eax,eax
149        jnz     short .columnDCT
150
151        ; -- AC terms all zero
152
153        movdqa  xmm5, XMMWORD [XMMBLOCK(0,0,esi,SIZEOF_JCOEF)]
154        pmullw  xmm5, XMMWORD [XMMBLOCK(0,0,edx,SIZEOF_ISLOW_MULT_TYPE)]
155
156        psllw   xmm5,PASS1_BITS
157
158        movdqa    xmm4,xmm5             ; xmm5=in0=(00 01 02 03 04 05 06 07)
159        punpcklwd xmm5,xmm5             ; xmm5=(00 00 01 01 02 02 03 03)
160        punpckhwd xmm4,xmm4             ; xmm4=(04 04 05 05 06 06 07 07)
161
162        pshufd  xmm7,xmm5,0x00          ; xmm7=col0=(00 00 00 00 00 00 00 00)
163        pshufd  xmm6,xmm5,0x55          ; xmm6=col1=(01 01 01 01 01 01 01 01)
164        pshufd  xmm1,xmm5,0xAA          ; xmm1=col2=(02 02 02 02 02 02 02 02)
165        pshufd  xmm5,xmm5,0xFF          ; xmm5=col3=(03 03 03 03 03 03 03 03)
166        pshufd  xmm0,xmm4,0x00          ; xmm0=col4=(04 04 04 04 04 04 04 04)
167        pshufd  xmm3,xmm4,0x55          ; xmm3=col5=(05 05 05 05 05 05 05 05)
168        pshufd  xmm2,xmm4,0xAA          ; xmm2=col6=(06 06 06 06 06 06 06 06)
169        pshufd  xmm4,xmm4,0xFF          ; xmm4=col7=(07 07 07 07 07 07 07 07)
170
171        movdqa  XMMWORD [wk(8)], xmm6   ; wk(8)=col1
172        movdqa  XMMWORD [wk(9)], xmm5   ; wk(9)=col3
173        movdqa  XMMWORD [wk(10)], xmm3  ; wk(10)=col5
174        movdqa  XMMWORD [wk(11)], xmm4  ; wk(11)=col7
175        jmp     near .column_end
176        alignx  16,7
177%endif
178.columnDCT:
179
180        ; -- Even part
181
182        movdqa  xmm0, XMMWORD [XMMBLOCK(0,0,esi,SIZEOF_JCOEF)]
183        movdqa  xmm1, XMMWORD [XMMBLOCK(2,0,esi,SIZEOF_JCOEF)]
184        pmullw  xmm0, XMMWORD [XMMBLOCK(0,0,edx,SIZEOF_ISLOW_MULT_TYPE)]
185        pmullw  xmm1, XMMWORD [XMMBLOCK(2,0,edx,SIZEOF_ISLOW_MULT_TYPE)]
186        movdqa  xmm2, XMMWORD [XMMBLOCK(4,0,esi,SIZEOF_JCOEF)]
187        movdqa  xmm3, XMMWORD [XMMBLOCK(6,0,esi,SIZEOF_JCOEF)]
188        pmullw  xmm2, XMMWORD [XMMBLOCK(4,0,edx,SIZEOF_ISLOW_MULT_TYPE)]
189        pmullw  xmm3, XMMWORD [XMMBLOCK(6,0,edx,SIZEOF_ISLOW_MULT_TYPE)]
190
191        ; (Original)
192        ; z1 = (z2 + z3) * 0.541196100;
193        ; tmp2 = z1 + z3 * -1.847759065;
194        ; tmp3 = z1 + z2 * 0.765366865;
195        ;
196        ; (This implementation)
197        ; tmp2 = z2 * 0.541196100 + z3 * (0.541196100 - 1.847759065);
198        ; tmp3 = z2 * (0.541196100 + 0.765366865) + z3 * 0.541196100;
199
200        movdqa    xmm4,xmm1             ; xmm1=in2=z2
201        movdqa    xmm5,xmm1
202        punpcklwd xmm4,xmm3             ; xmm3=in6=z3
203        punpckhwd xmm5,xmm3
204        movdqa    xmm1,xmm4
205        movdqa    xmm3,xmm5
206        pmaddwd   xmm4,[GOTOFF(ebx,PW_F130_F054)]       ; xmm4=tmp3L
207        pmaddwd   xmm5,[GOTOFF(ebx,PW_F130_F054)]       ; xmm5=tmp3H
208        pmaddwd   xmm1,[GOTOFF(ebx,PW_F054_MF130)]      ; xmm1=tmp2L
209        pmaddwd   xmm3,[GOTOFF(ebx,PW_F054_MF130)]      ; xmm3=tmp2H
210
211        movdqa    xmm6,xmm0
212        paddw     xmm0,xmm2             ; xmm0=in0+in4
213        psubw     xmm6,xmm2             ; xmm6=in0-in4
214
215        pxor      xmm7,xmm7
216        pxor      xmm2,xmm2
217        punpcklwd xmm7,xmm0             ; xmm7=tmp0L
218        punpckhwd xmm2,xmm0             ; xmm2=tmp0H
219        psrad     xmm7,(16-CONST_BITS)  ; psrad xmm7,16 & pslld xmm7,CONST_BITS
220        psrad     xmm2,(16-CONST_BITS)  ; psrad xmm2,16 & pslld xmm2,CONST_BITS
221
222        movdqa  xmm0,xmm7
223        paddd   xmm7,xmm4               ; xmm7=tmp10L
224        psubd   xmm0,xmm4               ; xmm0=tmp13L
225        movdqa  xmm4,xmm2
226        paddd   xmm2,xmm5               ; xmm2=tmp10H
227        psubd   xmm4,xmm5               ; xmm4=tmp13H
228
229        movdqa  XMMWORD [wk(0)], xmm7   ; wk(0)=tmp10L
230        movdqa  XMMWORD [wk(1)], xmm2   ; wk(1)=tmp10H
231        movdqa  XMMWORD [wk(2)], xmm0   ; wk(2)=tmp13L
232        movdqa  XMMWORD [wk(3)], xmm4   ; wk(3)=tmp13H
233
234        pxor      xmm5,xmm5
235        pxor      xmm7,xmm7
236        punpcklwd xmm5,xmm6             ; xmm5=tmp1L
237        punpckhwd xmm7,xmm6             ; xmm7=tmp1H
238        psrad     xmm5,(16-CONST_BITS)  ; psrad xmm5,16 & pslld xmm5,CONST_BITS
239        psrad     xmm7,(16-CONST_BITS)  ; psrad xmm7,16 & pslld xmm7,CONST_BITS
240
241        movdqa  xmm2,xmm5
242        paddd   xmm5,xmm1               ; xmm5=tmp11L
243        psubd   xmm2,xmm1               ; xmm2=tmp12L
244        movdqa  xmm0,xmm7
245        paddd   xmm7,xmm3               ; xmm7=tmp11H
246        psubd   xmm0,xmm3               ; xmm0=tmp12H
247
248        movdqa  XMMWORD [wk(4)], xmm5   ; wk(4)=tmp11L
249        movdqa  XMMWORD [wk(5)], xmm7   ; wk(5)=tmp11H
250        movdqa  XMMWORD [wk(6)], xmm2   ; wk(6)=tmp12L
251        movdqa  XMMWORD [wk(7)], xmm0   ; wk(7)=tmp12H
252
253        ; -- Odd part
254
255        movdqa  xmm4, XMMWORD [XMMBLOCK(1,0,esi,SIZEOF_JCOEF)]
256        movdqa  xmm6, XMMWORD [XMMBLOCK(3,0,esi,SIZEOF_JCOEF)]
257        pmullw  xmm4, XMMWORD [XMMBLOCK(1,0,edx,SIZEOF_ISLOW_MULT_TYPE)]
258        pmullw  xmm6, XMMWORD [XMMBLOCK(3,0,edx,SIZEOF_ISLOW_MULT_TYPE)]
259        movdqa  xmm1, XMMWORD [XMMBLOCK(5,0,esi,SIZEOF_JCOEF)]
260        movdqa  xmm3, XMMWORD [XMMBLOCK(7,0,esi,SIZEOF_JCOEF)]
261        pmullw  xmm1, XMMWORD [XMMBLOCK(5,0,edx,SIZEOF_ISLOW_MULT_TYPE)]
262        pmullw  xmm3, XMMWORD [XMMBLOCK(7,0,edx,SIZEOF_ISLOW_MULT_TYPE)]
263
264        movdqa  xmm5,xmm6
265        movdqa  xmm7,xmm4
266        paddw   xmm5,xmm3               ; xmm5=z3
267        paddw   xmm7,xmm1               ; xmm7=z4
268
269        ; (Original)
270        ; z5 = (z3 + z4) * 1.175875602;
271        ; z3 = z3 * -1.961570560;  z4 = z4 * -0.390180644;
272        ; z3 += z5;  z4 += z5;
273        ;
274        ; (This implementation)
275        ; z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602;
276        ; z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644);
277
278        movdqa    xmm2,xmm5
279        movdqa    xmm0,xmm5
280        punpcklwd xmm2,xmm7
281        punpckhwd xmm0,xmm7
282        movdqa    xmm5,xmm2
283        movdqa    xmm7,xmm0
284        pmaddwd   xmm2,[GOTOFF(ebx,PW_MF078_F117)]      ; xmm2=z3L
285        pmaddwd   xmm0,[GOTOFF(ebx,PW_MF078_F117)]      ; xmm0=z3H
286        pmaddwd   xmm5,[GOTOFF(ebx,PW_F117_F078)]       ; xmm5=z4L
287        pmaddwd   xmm7,[GOTOFF(ebx,PW_F117_F078)]       ; xmm7=z4H
288
289        movdqa  XMMWORD [wk(10)], xmm2  ; wk(10)=z3L
290        movdqa  XMMWORD [wk(11)], xmm0  ; wk(11)=z3H
291
292        ; (Original)
293        ; z1 = tmp0 + tmp3;  z2 = tmp1 + tmp2;
294        ; tmp0 = tmp0 * 0.298631336;  tmp1 = tmp1 * 2.053119869;
295        ; tmp2 = tmp2 * 3.072711026;  tmp3 = tmp3 * 1.501321110;
296        ; z1 = z1 * -0.899976223;  z2 = z2 * -2.562915447;
297        ; tmp0 += z1 + z3;  tmp1 += z2 + z4;
298        ; tmp2 += z2 + z3;  tmp3 += z1 + z4;
299        ;
300        ; (This implementation)
301        ; tmp0 = tmp0 * (0.298631336 - 0.899976223) + tmp3 * -0.899976223;
302        ; tmp1 = tmp1 * (2.053119869 - 2.562915447) + tmp2 * -2.562915447;
303        ; tmp2 = tmp1 * -2.562915447 + tmp2 * (3.072711026 - 2.562915447);
304        ; tmp3 = tmp0 * -0.899976223 + tmp3 * (1.501321110 - 0.899976223);
305        ; tmp0 += z3;  tmp1 += z4;
306        ; tmp2 += z3;  tmp3 += z4;
307
308        movdqa    xmm2,xmm3
309        movdqa    xmm0,xmm3
310        punpcklwd xmm2,xmm4
311        punpckhwd xmm0,xmm4
312        movdqa    xmm3,xmm2
313        movdqa    xmm4,xmm0
314        pmaddwd   xmm2,[GOTOFF(ebx,PW_MF060_MF089)]     ; xmm2=tmp0L
315        pmaddwd   xmm0,[GOTOFF(ebx,PW_MF060_MF089)]     ; xmm0=tmp0H
316        pmaddwd   xmm3,[GOTOFF(ebx,PW_MF089_F060)]      ; xmm3=tmp3L
317        pmaddwd   xmm4,[GOTOFF(ebx,PW_MF089_F060)]      ; xmm4=tmp3H
318
319        paddd   xmm2, XMMWORD [wk(10)]  ; xmm2=tmp0L
320        paddd   xmm0, XMMWORD [wk(11)]  ; xmm0=tmp0H
321        paddd   xmm3,xmm5               ; xmm3=tmp3L
322        paddd   xmm4,xmm7               ; xmm4=tmp3H
323
324        movdqa  XMMWORD [wk(8)], xmm2   ; wk(8)=tmp0L
325        movdqa  XMMWORD [wk(9)], xmm0   ; wk(9)=tmp0H
326
327        movdqa    xmm2,xmm1
328        movdqa    xmm0,xmm1
329        punpcklwd xmm2,xmm6
330        punpckhwd xmm0,xmm6
331        movdqa    xmm1,xmm2
332        movdqa    xmm6,xmm0
333        pmaddwd   xmm2,[GOTOFF(ebx,PW_MF050_MF256)]     ; xmm2=tmp1L
334        pmaddwd   xmm0,[GOTOFF(ebx,PW_MF050_MF256)]     ; xmm0=tmp1H
335        pmaddwd   xmm1,[GOTOFF(ebx,PW_MF256_F050)]      ; xmm1=tmp2L
336        pmaddwd   xmm6,[GOTOFF(ebx,PW_MF256_F050)]      ; xmm6=tmp2H
337
338        paddd   xmm2,xmm5               ; xmm2=tmp1L
339        paddd   xmm0,xmm7               ; xmm0=tmp1H
340        paddd   xmm1, XMMWORD [wk(10)]  ; xmm1=tmp2L
341        paddd   xmm6, XMMWORD [wk(11)]  ; xmm6=tmp2H
342
343        movdqa  XMMWORD [wk(10)], xmm2  ; wk(10)=tmp1L
344        movdqa  XMMWORD [wk(11)], xmm0  ; wk(11)=tmp1H
345
346        ; -- Final output stage
347
348        movdqa  xmm5, XMMWORD [wk(0)]   ; xmm5=tmp10L
349        movdqa  xmm7, XMMWORD [wk(1)]   ; xmm7=tmp10H
350
351        movdqa  xmm2,xmm5
352        movdqa  xmm0,xmm7
353        paddd   xmm5,xmm3               ; xmm5=data0L
354        paddd   xmm7,xmm4               ; xmm7=data0H
355        psubd   xmm2,xmm3               ; xmm2=data7L
356        psubd   xmm0,xmm4               ; xmm0=data7H
357
358        movdqa  xmm3,[GOTOFF(ebx,PD_DESCALE_P1)]        ; xmm3=[PD_DESCALE_P1]
359
360        paddd   xmm5,xmm3
361        paddd   xmm7,xmm3
362        psrad   xmm5,DESCALE_P1
363        psrad   xmm7,DESCALE_P1
364        paddd   xmm2,xmm3
365        paddd   xmm0,xmm3
366        psrad   xmm2,DESCALE_P1
367        psrad   xmm0,DESCALE_P1
368
369        packssdw  xmm5,xmm7             ; xmm5=data0=(00 01 02 03 04 05 06 07)
370        packssdw  xmm2,xmm0             ; xmm2=data7=(70 71 72 73 74 75 76 77)
371
372        movdqa  xmm4, XMMWORD [wk(4)]   ; xmm4=tmp11L
373        movdqa  xmm3, XMMWORD [wk(5)]   ; xmm3=tmp11H
374
375        movdqa  xmm7,xmm4
376        movdqa  xmm0,xmm3
377        paddd   xmm4,xmm1               ; xmm4=data1L
378        paddd   xmm3,xmm6               ; xmm3=data1H
379        psubd   xmm7,xmm1               ; xmm7=data6L
380        psubd   xmm0,xmm6               ; xmm0=data6H
381
382        movdqa  xmm1,[GOTOFF(ebx,PD_DESCALE_P1)]        ; xmm1=[PD_DESCALE_P1]
383
384        paddd   xmm4,xmm1
385        paddd   xmm3,xmm1
386        psrad   xmm4,DESCALE_P1
387        psrad   xmm3,DESCALE_P1
388        paddd   xmm7,xmm1
389        paddd   xmm0,xmm1
390        psrad   xmm7,DESCALE_P1
391        psrad   xmm0,DESCALE_P1
392
393        packssdw  xmm4,xmm3             ; xmm4=data1=(10 11 12 13 14 15 16 17)
394        packssdw  xmm7,xmm0             ; xmm7=data6=(60 61 62 63 64 65 66 67)
395
396        movdqa    xmm6,xmm5             ; transpose coefficients(phase 1)
397        punpcklwd xmm5,xmm4             ; xmm5=(00 10 01 11 02 12 03 13)
398        punpckhwd xmm6,xmm4             ; xmm6=(04 14 05 15 06 16 07 17)
399        movdqa    xmm1,xmm7             ; transpose coefficients(phase 1)
400        punpcklwd xmm7,xmm2             ; xmm7=(60 70 61 71 62 72 63 73)
401        punpckhwd xmm1,xmm2             ; xmm1=(64 74 65 75 66 76 67 77)
402
403        movdqa  xmm3, XMMWORD [wk(6)]   ; xmm3=tmp12L
404        movdqa  xmm0, XMMWORD [wk(7)]   ; xmm0=tmp12H
405        movdqa  xmm4, XMMWORD [wk(10)]  ; xmm4=tmp1L
406        movdqa  xmm2, XMMWORD [wk(11)]  ; xmm2=tmp1H
407
408        movdqa  XMMWORD [wk(0)], xmm5   ; wk(0)=(00 10 01 11 02 12 03 13)
409        movdqa  XMMWORD [wk(1)], xmm6   ; wk(1)=(04 14 05 15 06 16 07 17)
410        movdqa  XMMWORD [wk(4)], xmm7   ; wk(4)=(60 70 61 71 62 72 63 73)
411        movdqa  XMMWORD [wk(5)], xmm1   ; wk(5)=(64 74 65 75 66 76 67 77)
412
413        movdqa  xmm5,xmm3
414        movdqa  xmm6,xmm0
415        paddd   xmm3,xmm4               ; xmm3=data2L
416        paddd   xmm0,xmm2               ; xmm0=data2H
417        psubd   xmm5,xmm4               ; xmm5=data5L
418        psubd   xmm6,xmm2               ; xmm6=data5H
419
420        movdqa  xmm7,[GOTOFF(ebx,PD_DESCALE_P1)]        ; xmm7=[PD_DESCALE_P1]
421
422        paddd   xmm3,xmm7
423        paddd   xmm0,xmm7
424        psrad   xmm3,DESCALE_P1
425        psrad   xmm0,DESCALE_P1
426        paddd   xmm5,xmm7
427        paddd   xmm6,xmm7
428        psrad   xmm5,DESCALE_P1
429        psrad   xmm6,DESCALE_P1
430
431        packssdw  xmm3,xmm0             ; xmm3=data2=(20 21 22 23 24 25 26 27)
432        packssdw  xmm5,xmm6             ; xmm5=data5=(50 51 52 53 54 55 56 57)
433
434        movdqa  xmm1, XMMWORD [wk(2)]   ; xmm1=tmp13L
435        movdqa  xmm4, XMMWORD [wk(3)]   ; xmm4=tmp13H
436        movdqa  xmm2, XMMWORD [wk(8)]   ; xmm2=tmp0L
437        movdqa  xmm7, XMMWORD [wk(9)]   ; xmm7=tmp0H
438
439        movdqa  xmm0,xmm1
440        movdqa  xmm6,xmm4
441        paddd   xmm1,xmm2               ; xmm1=data3L
442        paddd   xmm4,xmm7               ; xmm4=data3H
443        psubd   xmm0,xmm2               ; xmm0=data4L
444        psubd   xmm6,xmm7               ; xmm6=data4H
445
446        movdqa  xmm2,[GOTOFF(ebx,PD_DESCALE_P1)]        ; xmm2=[PD_DESCALE_P1]
447
448        paddd   xmm1,xmm2
449        paddd   xmm4,xmm2
450        psrad   xmm1,DESCALE_P1
451        psrad   xmm4,DESCALE_P1
452        paddd   xmm0,xmm2
453        paddd   xmm6,xmm2
454        psrad   xmm0,DESCALE_P1
455        psrad   xmm6,DESCALE_P1
456
457        packssdw  xmm1,xmm4             ; xmm1=data3=(30 31 32 33 34 35 36 37)
458        packssdw  xmm0,xmm6             ; xmm0=data4=(40 41 42 43 44 45 46 47)
459
460        movdqa  xmm7, XMMWORD [wk(0)]   ; xmm7=(00 10 01 11 02 12 03 13)
461        movdqa  xmm2, XMMWORD [wk(1)]   ; xmm2=(04 14 05 15 06 16 07 17)
462
463        movdqa    xmm4,xmm3             ; transpose coefficients(phase 1)
464        punpcklwd xmm3,xmm1             ; xmm3=(20 30 21 31 22 32 23 33)
465        punpckhwd xmm4,xmm1             ; xmm4=(24 34 25 35 26 36 27 37)
466        movdqa    xmm6,xmm0             ; transpose coefficients(phase 1)
467        punpcklwd xmm0,xmm5             ; xmm0=(40 50 41 51 42 52 43 53)
468        punpckhwd xmm6,xmm5             ; xmm6=(44 54 45 55 46 56 47 57)
469
470        movdqa    xmm1,xmm7             ; transpose coefficients(phase 2)
471        punpckldq xmm7,xmm3             ; xmm7=(00 10 20 30 01 11 21 31)
472        punpckhdq xmm1,xmm3             ; xmm1=(02 12 22 32 03 13 23 33)
473        movdqa    xmm5,xmm2             ; transpose coefficients(phase 2)
474        punpckldq xmm2,xmm4             ; xmm2=(04 14 24 34 05 15 25 35)
475        punpckhdq xmm5,xmm4             ; xmm5=(06 16 26 36 07 17 27 37)
476
477        movdqa  xmm3, XMMWORD [wk(4)]   ; xmm3=(60 70 61 71 62 72 63 73)
478        movdqa  xmm4, XMMWORD [wk(5)]   ; xmm4=(64 74 65 75 66 76 67 77)
479
480        movdqa  XMMWORD [wk(6)], xmm2   ; wk(6)=(04 14 24 34 05 15 25 35)
481        movdqa  XMMWORD [wk(7)], xmm5   ; wk(7)=(06 16 26 36 07 17 27 37)
482
483        movdqa    xmm2,xmm0             ; transpose coefficients(phase 2)
484        punpckldq xmm0,xmm3             ; xmm0=(40 50 60 70 41 51 61 71)
485        punpckhdq xmm2,xmm3             ; xmm2=(42 52 62 72 43 53 63 73)
486        movdqa    xmm5,xmm6             ; transpose coefficients(phase 2)
487        punpckldq xmm6,xmm4             ; xmm6=(44 54 64 74 45 55 65 75)
488        punpckhdq xmm5,xmm4             ; xmm5=(46 56 66 76 47 57 67 77)
489
490        movdqa     xmm3,xmm7            ; transpose coefficients(phase 3)
491        punpcklqdq xmm7,xmm0            ; xmm7=col0=(00 10 20 30 40 50 60 70)
492        punpckhqdq xmm3,xmm0            ; xmm3=col1=(01 11 21 31 41 51 61 71)
493        movdqa     xmm4,xmm1            ; transpose coefficients(phase 3)
494        punpcklqdq xmm1,xmm2            ; xmm1=col2=(02 12 22 32 42 52 62 72)
495        punpckhqdq xmm4,xmm2            ; xmm4=col3=(03 13 23 33 43 53 63 73)
496
497        movdqa  xmm0, XMMWORD [wk(6)]   ; xmm0=(04 14 24 34 05 15 25 35)
498        movdqa  xmm2, XMMWORD [wk(7)]   ; xmm2=(06 16 26 36 07 17 27 37)
499
500        movdqa  XMMWORD [wk(8)], xmm3   ; wk(8)=col1
501        movdqa  XMMWORD [wk(9)], xmm4   ; wk(9)=col3
502
503        movdqa     xmm3,xmm0            ; transpose coefficients(phase 3)
504        punpcklqdq xmm0,xmm6            ; xmm0=col4=(04 14 24 34 44 54 64 74)
505        punpckhqdq xmm3,xmm6            ; xmm3=col5=(05 15 25 35 45 55 65 75)
506        movdqa     xmm4,xmm2            ; transpose coefficients(phase 3)
507        punpcklqdq xmm2,xmm5            ; xmm2=col6=(06 16 26 36 46 56 66 76)
508        punpckhqdq xmm4,xmm5            ; xmm4=col7=(07 17 27 37 47 57 67 77)
509
510        movdqa  XMMWORD [wk(10)], xmm3  ; wk(10)=col5
511        movdqa  XMMWORD [wk(11)], xmm4  ; wk(11)=col7
512.column_end:
513
514        ; -- Prefetch the next coefficient block
515
516        prefetchnta [esi + DCTSIZE2*SIZEOF_JCOEF + 0*32]
517        prefetchnta [esi + DCTSIZE2*SIZEOF_JCOEF + 1*32]
518        prefetchnta [esi + DCTSIZE2*SIZEOF_JCOEF + 2*32]
519        prefetchnta [esi + DCTSIZE2*SIZEOF_JCOEF + 3*32]
520
521        ; ---- Pass 2: process rows from work array, store into output array.
522
523        mov     eax, [original_ebp]
524        mov     edi, JSAMPARRAY [output_buf(eax)]       ; (JSAMPROW *)
525        mov     eax, JDIMENSION [output_col(eax)]
526
527        ; -- Even part
528
529        ; xmm7=col0, xmm1=col2, xmm0=col4, xmm2=col6
530
531        ; (Original)
532        ; z1 = (z2 + z3) * 0.541196100;
533        ; tmp2 = z1 + z3 * -1.847759065;
534        ; tmp3 = z1 + z2 * 0.765366865;
535        ;
536        ; (This implementation)
537        ; tmp2 = z2 * 0.541196100 + z3 * (0.541196100 - 1.847759065);
538        ; tmp3 = z2 * (0.541196100 + 0.765366865) + z3 * 0.541196100;
539
540        movdqa    xmm6,xmm1             ; xmm1=in2=z2
541        movdqa    xmm5,xmm1
542        punpcklwd xmm6,xmm2             ; xmm2=in6=z3
543        punpckhwd xmm5,xmm2
544        movdqa    xmm1,xmm6
545        movdqa    xmm2,xmm5
546        pmaddwd   xmm6,[GOTOFF(ebx,PW_F130_F054)]       ; xmm6=tmp3L
547        pmaddwd   xmm5,[GOTOFF(ebx,PW_F130_F054)]       ; xmm5=tmp3H
548        pmaddwd   xmm1,[GOTOFF(ebx,PW_F054_MF130)]      ; xmm1=tmp2L
549        pmaddwd   xmm2,[GOTOFF(ebx,PW_F054_MF130)]      ; xmm2=tmp2H
550
551        movdqa    xmm3,xmm7
552        paddw     xmm7,xmm0             ; xmm7=in0+in4
553        psubw     xmm3,xmm0             ; xmm3=in0-in4
554
555        pxor      xmm4,xmm4
556        pxor      xmm0,xmm0
557        punpcklwd xmm4,xmm7             ; xmm4=tmp0L
558        punpckhwd xmm0,xmm7             ; xmm0=tmp0H
559        psrad     xmm4,(16-CONST_BITS)  ; psrad xmm4,16 & pslld xmm4,CONST_BITS
560        psrad     xmm0,(16-CONST_BITS)  ; psrad xmm0,16 & pslld xmm0,CONST_BITS
561
562        movdqa  xmm7,xmm4
563        paddd   xmm4,xmm6               ; xmm4=tmp10L
564        psubd   xmm7,xmm6               ; xmm7=tmp13L
565        movdqa  xmm6,xmm0
566        paddd   xmm0,xmm5               ; xmm0=tmp10H
567        psubd   xmm6,xmm5               ; xmm6=tmp13H
568
569        movdqa  XMMWORD [wk(0)], xmm4   ; wk(0)=tmp10L
570        movdqa  XMMWORD [wk(1)], xmm0   ; wk(1)=tmp10H
571        movdqa  XMMWORD [wk(2)], xmm7   ; wk(2)=tmp13L
572        movdqa  XMMWORD [wk(3)], xmm6   ; wk(3)=tmp13H
573
574        pxor      xmm5,xmm5
575        pxor      xmm4,xmm4
576        punpcklwd xmm5,xmm3             ; xmm5=tmp1L
577        punpckhwd xmm4,xmm3             ; xmm4=tmp1H
578        psrad     xmm5,(16-CONST_BITS)  ; psrad xmm5,16 & pslld xmm5,CONST_BITS
579        psrad     xmm4,(16-CONST_BITS)  ; psrad xmm4,16 & pslld xmm4,CONST_BITS
580
581        movdqa  xmm0,xmm5
582        paddd   xmm5,xmm1               ; xmm5=tmp11L
583        psubd   xmm0,xmm1               ; xmm0=tmp12L
584        movdqa  xmm7,xmm4
585        paddd   xmm4,xmm2               ; xmm4=tmp11H
586        psubd   xmm7,xmm2               ; xmm7=tmp12H
587
588        movdqa  XMMWORD [wk(4)], xmm5   ; wk(4)=tmp11L
589        movdqa  XMMWORD [wk(5)], xmm4   ; wk(5)=tmp11H
590        movdqa  XMMWORD [wk(6)], xmm0   ; wk(6)=tmp12L
591        movdqa  XMMWORD [wk(7)], xmm7   ; wk(7)=tmp12H
592
593        ; -- Odd part
594
595        movdqa  xmm6, XMMWORD [wk(9)]   ; xmm6=col3
596        movdqa  xmm3, XMMWORD [wk(8)]   ; xmm3=col1
597        movdqa  xmm1, XMMWORD [wk(11)]  ; xmm1=col7
598        movdqa  xmm2, XMMWORD [wk(10)]  ; xmm2=col5
599
600        movdqa  xmm5,xmm6
601        movdqa  xmm4,xmm3
602        paddw   xmm5,xmm1               ; xmm5=z3
603        paddw   xmm4,xmm2               ; xmm4=z4
604
605        ; (Original)
606        ; z5 = (z3 + z4) * 1.175875602;
607        ; z3 = z3 * -1.961570560;  z4 = z4 * -0.390180644;
608        ; z3 += z5;  z4 += z5;
609        ;
610        ; (This implementation)
611        ; z3 = z3 * (1.175875602 - 1.961570560) + z4 * 1.175875602;
612        ; z4 = z3 * 1.175875602 + z4 * (1.175875602 - 0.390180644);
613
614        movdqa    xmm0,xmm5
615        movdqa    xmm7,xmm5
616        punpcklwd xmm0,xmm4
617        punpckhwd xmm7,xmm4
618        movdqa    xmm5,xmm0
619        movdqa    xmm4,xmm7
620        pmaddwd   xmm0,[GOTOFF(ebx,PW_MF078_F117)]      ; xmm0=z3L
621        pmaddwd   xmm7,[GOTOFF(ebx,PW_MF078_F117)]      ; xmm7=z3H
622        pmaddwd   xmm5,[GOTOFF(ebx,PW_F117_F078)]       ; xmm5=z4L
623        pmaddwd   xmm4,[GOTOFF(ebx,PW_F117_F078)]       ; xmm4=z4H
624
625        movdqa  XMMWORD [wk(10)], xmm0  ; wk(10)=z3L
626        movdqa  XMMWORD [wk(11)], xmm7  ; wk(11)=z3H
627
628        ; (Original)
629        ; z1 = tmp0 + tmp3;  z2 = tmp1 + tmp2;
630        ; tmp0 = tmp0 * 0.298631336;  tmp1 = tmp1 * 2.053119869;
631        ; tmp2 = tmp2 * 3.072711026;  tmp3 = tmp3 * 1.501321110;
632        ; z1 = z1 * -0.899976223;  z2 = z2 * -2.562915447;
633        ; tmp0 += z1 + z3;  tmp1 += z2 + z4;
634        ; tmp2 += z2 + z3;  tmp3 += z1 + z4;
635        ;
636        ; (This implementation)
637        ; tmp0 = tmp0 * (0.298631336 - 0.899976223) + tmp3 * -0.899976223;
638        ; tmp1 = tmp1 * (2.053119869 - 2.562915447) + tmp2 * -2.562915447;
639        ; tmp2 = tmp1 * -2.562915447 + tmp2 * (3.072711026 - 2.562915447);
640        ; tmp3 = tmp0 * -0.899976223 + tmp3 * (1.501321110 - 0.899976223);
641        ; tmp0 += z3;  tmp1 += z4;
642        ; tmp2 += z3;  tmp3 += z4;
643
644        movdqa    xmm0,xmm1
645        movdqa    xmm7,xmm1
646        punpcklwd xmm0,xmm3
647        punpckhwd xmm7,xmm3
648        movdqa    xmm1,xmm0
649        movdqa    xmm3,xmm7
650        pmaddwd   xmm0,[GOTOFF(ebx,PW_MF060_MF089)]     ; xmm0=tmp0L
651        pmaddwd   xmm7,[GOTOFF(ebx,PW_MF060_MF089)]     ; xmm7=tmp0H
652        pmaddwd   xmm1,[GOTOFF(ebx,PW_MF089_F060)]      ; xmm1=tmp3L
653        pmaddwd   xmm3,[GOTOFF(ebx,PW_MF089_F060)]      ; xmm3=tmp3H
654
655        paddd   xmm0, XMMWORD [wk(10)]  ; xmm0=tmp0L
656        paddd   xmm7, XMMWORD [wk(11)]  ; xmm7=tmp0H
657        paddd   xmm1,xmm5               ; xmm1=tmp3L
658        paddd   xmm3,xmm4               ; xmm3=tmp3H
659
660        movdqa  XMMWORD [wk(8)], xmm0   ; wk(8)=tmp0L
661        movdqa  XMMWORD [wk(9)], xmm7   ; wk(9)=tmp0H
662
663        movdqa    xmm0,xmm2
664        movdqa    xmm7,xmm2
665        punpcklwd xmm0,xmm6
666        punpckhwd xmm7,xmm6
667        movdqa    xmm2,xmm0
668        movdqa    xmm6,xmm7
669        pmaddwd   xmm0,[GOTOFF(ebx,PW_MF050_MF256)]     ; xmm0=tmp1L
670        pmaddwd   xmm7,[GOTOFF(ebx,PW_MF050_MF256)]     ; xmm7=tmp1H
671        pmaddwd   xmm2,[GOTOFF(ebx,PW_MF256_F050)]      ; xmm2=tmp2L
672        pmaddwd   xmm6,[GOTOFF(ebx,PW_MF256_F050)]      ; xmm6=tmp2H
673
674        paddd   xmm0,xmm5               ; xmm0=tmp1L
675        paddd   xmm7,xmm4               ; xmm7=tmp1H
676        paddd   xmm2, XMMWORD [wk(10)]  ; xmm2=tmp2L
677        paddd   xmm6, XMMWORD [wk(11)]  ; xmm6=tmp2H
678
679        movdqa  XMMWORD [wk(10)], xmm0  ; wk(10)=tmp1L
680        movdqa  XMMWORD [wk(11)], xmm7  ; wk(11)=tmp1H
681
682        ; -- Final output stage
683
684        movdqa  xmm5, XMMWORD [wk(0)]   ; xmm5=tmp10L
685        movdqa  xmm4, XMMWORD [wk(1)]   ; xmm4=tmp10H
686
687        movdqa  xmm0,xmm5
688        movdqa  xmm7,xmm4
689        paddd   xmm5,xmm1               ; xmm5=data0L
690        paddd   xmm4,xmm3               ; xmm4=data0H
691        psubd   xmm0,xmm1               ; xmm0=data7L
692        psubd   xmm7,xmm3               ; xmm7=data7H
693
694        movdqa  xmm1,[GOTOFF(ebx,PD_DESCALE_P2)]        ; xmm1=[PD_DESCALE_P2]
695
696        paddd   xmm5,xmm1
697        paddd   xmm4,xmm1
698        psrad   xmm5,DESCALE_P2
699        psrad   xmm4,DESCALE_P2
700        paddd   xmm0,xmm1
701        paddd   xmm7,xmm1
702        psrad   xmm0,DESCALE_P2
703        psrad   xmm7,DESCALE_P2
704
705        packssdw  xmm5,xmm4             ; xmm5=data0=(00 10 20 30 40 50 60 70)
706        packssdw  xmm0,xmm7             ; xmm0=data7=(07 17 27 37 47 57 67 77)
707
708        movdqa  xmm3, XMMWORD [wk(4)]   ; xmm3=tmp11L
709        movdqa  xmm1, XMMWORD [wk(5)]   ; xmm1=tmp11H
710
711        movdqa  xmm4,xmm3
712        movdqa  xmm7,xmm1
713        paddd   xmm3,xmm2               ; xmm3=data1L
714        paddd   xmm1,xmm6               ; xmm1=data1H
715        psubd   xmm4,xmm2               ; xmm4=data6L
716        psubd   xmm7,xmm6               ; xmm7=data6H
717
718        movdqa  xmm2,[GOTOFF(ebx,PD_DESCALE_P2)]        ; xmm2=[PD_DESCALE_P2]
719
720        paddd   xmm3,xmm2
721        paddd   xmm1,xmm2
722        psrad   xmm3,DESCALE_P2
723        psrad   xmm1,DESCALE_P2
724        paddd   xmm4,xmm2
725        paddd   xmm7,xmm2
726        psrad   xmm4,DESCALE_P2
727        psrad   xmm7,DESCALE_P2
728
729        packssdw  xmm3,xmm1             ; xmm3=data1=(01 11 21 31 41 51 61 71)
730        packssdw  xmm4,xmm7             ; xmm4=data6=(06 16 26 36 46 56 66 76)
731
732        packsswb  xmm5,xmm4             ; xmm5=(00 10 20 30 40 50 60 70 06 16 26 36 46 56 66 76)
733        packsswb  xmm3,xmm0             ; xmm3=(01 11 21 31 41 51 61 71 07 17 27 37 47 57 67 77)
734
735        movdqa  xmm6, XMMWORD [wk(6)]   ; xmm6=tmp12L
736        movdqa  xmm2, XMMWORD [wk(7)]   ; xmm2=tmp12H
737        movdqa  xmm1, XMMWORD [wk(10)]  ; xmm1=tmp1L
738        movdqa  xmm7, XMMWORD [wk(11)]  ; xmm7=tmp1H
739
740        movdqa  XMMWORD [wk(0)], xmm5   ; wk(0)=(00 10 20 30 40 50 60 70 06 16 26 36 46 56 66 76)
741        movdqa  XMMWORD [wk(1)], xmm3   ; wk(1)=(01 11 21 31 41 51 61 71 07 17 27 37 47 57 67 77)
742
743        movdqa  xmm4,xmm6
744        movdqa  xmm0,xmm2
745        paddd   xmm6,xmm1               ; xmm6=data2L
746        paddd   xmm2,xmm7               ; xmm2=data2H
747        psubd   xmm4,xmm1               ; xmm4=data5L
748        psubd   xmm0,xmm7               ; xmm0=data5H
749
750        movdqa  xmm5,[GOTOFF(ebx,PD_DESCALE_P2)]        ; xmm5=[PD_DESCALE_P2]
751
752        paddd   xmm6,xmm5
753        paddd   xmm2,xmm5
754        psrad   xmm6,DESCALE_P2
755        psrad   xmm2,DESCALE_P2
756        paddd   xmm4,xmm5
757        paddd   xmm0,xmm5
758        psrad   xmm4,DESCALE_P2
759        psrad   xmm0,DESCALE_P2
760
761        packssdw  xmm6,xmm2             ; xmm6=data2=(02 12 22 32 42 52 62 72)
762        packssdw  xmm4,xmm0             ; xmm4=data5=(05 15 25 35 45 55 65 75)
763
764        movdqa  xmm3, XMMWORD [wk(2)]   ; xmm3=tmp13L
765        movdqa  xmm1, XMMWORD [wk(3)]   ; xmm1=tmp13H
766        movdqa  xmm7, XMMWORD [wk(8)]   ; xmm7=tmp0L
767        movdqa  xmm5, XMMWORD [wk(9)]   ; xmm5=tmp0H
768
769        movdqa  xmm2,xmm3
770        movdqa  xmm0,xmm1
771        paddd   xmm3,xmm7               ; xmm3=data3L
772        paddd   xmm1,xmm5               ; xmm1=data3H
773        psubd   xmm2,xmm7               ; xmm2=data4L
774        psubd   xmm0,xmm5               ; xmm0=data4H
775
776        movdqa  xmm7,[GOTOFF(ebx,PD_DESCALE_P2)]        ; xmm7=[PD_DESCALE_P2]
777
778        paddd   xmm3,xmm7
779        paddd   xmm1,xmm7
780        psrad   xmm3,DESCALE_P2
781        psrad   xmm1,DESCALE_P2
782        paddd   xmm2,xmm7
783        paddd   xmm0,xmm7
784        psrad   xmm2,DESCALE_P2
785        psrad   xmm0,DESCALE_P2
786
787        movdqa    xmm5,[GOTOFF(ebx,PB_CENTERJSAMP)]     ; xmm5=[PB_CENTERJSAMP]
788
789        packssdw  xmm3,xmm1             ; xmm3=data3=(03 13 23 33 43 53 63 73)
790        packssdw  xmm2,xmm0             ; xmm2=data4=(04 14 24 34 44 54 64 74)
791
792        movdqa    xmm7, XMMWORD [wk(0)] ; xmm7=(00 10 20 30 40 50 60 70 06 16 26 36 46 56 66 76)
793        movdqa    xmm1, XMMWORD [wk(1)] ; xmm1=(01 11 21 31 41 51 61 71 07 17 27 37 47 57 67 77)
794
795        packsswb  xmm6,xmm2             ; xmm6=(02 12 22 32 42 52 62 72 04 14 24 34 44 54 64 74)
796        packsswb  xmm3,xmm4             ; xmm3=(03 13 23 33 43 53 63 73 05 15 25 35 45 55 65 75)
797
798        paddb     xmm7,xmm5
799        paddb     xmm1,xmm5
800        paddb     xmm6,xmm5
801        paddb     xmm3,xmm5
802
803        movdqa    xmm0,xmm7     ; transpose coefficients(phase 1)
804        punpcklbw xmm7,xmm1     ; xmm7=(00 01 10 11 20 21 30 31 40 41 50 51 60 61 70 71)
805        punpckhbw xmm0,xmm1     ; xmm0=(06 07 16 17 26 27 36 37 46 47 56 57 66 67 76 77)
806        movdqa    xmm2,xmm6     ; transpose coefficients(phase 1)
807        punpcklbw xmm6,xmm3     ; xmm6=(02 03 12 13 22 23 32 33 42 43 52 53 62 63 72 73)
808        punpckhbw xmm2,xmm3     ; xmm2=(04 05 14 15 24 25 34 35 44 45 54 55 64 65 74 75)
809
810        movdqa    xmm4,xmm7     ; transpose coefficients(phase 2)
811        punpcklwd xmm7,xmm6     ; xmm7=(00 01 02 03 10 11 12 13 20 21 22 23 30 31 32 33)
812        punpckhwd xmm4,xmm6     ; xmm4=(40 41 42 43 50 51 52 53 60 61 62 63 70 71 72 73)
813        movdqa    xmm5,xmm2     ; transpose coefficients(phase 2)
814        punpcklwd xmm2,xmm0     ; xmm2=(04 05 06 07 14 15 16 17 24 25 26 27 34 35 36 37)
815        punpckhwd xmm5,xmm0     ; xmm5=(44 45 46 47 54 55 56 57 64 65 66 67 74 75 76 77)
816
817        movdqa    xmm1,xmm7     ; transpose coefficients(phase 3)
818        punpckldq xmm7,xmm2     ; xmm7=(00 01 02 03 04 05 06 07 10 11 12 13 14 15 16 17)
819        punpckhdq xmm1,xmm2     ; xmm1=(20 21 22 23 24 25 26 27 30 31 32 33 34 35 36 37)
820        movdqa    xmm3,xmm4     ; transpose coefficients(phase 3)
821        punpckldq xmm4,xmm5     ; xmm4=(40 41 42 43 44 45 46 47 50 51 52 53 54 55 56 57)
822        punpckhdq xmm3,xmm5     ; xmm3=(60 61 62 63 64 65 66 67 70 71 72 73 74 75 76 77)
823
824        pshufd  xmm6,xmm7,0x4E  ; xmm6=(10 11 12 13 14 15 16 17 00 01 02 03 04 05 06 07)
825        pshufd  xmm0,xmm1,0x4E  ; xmm0=(30 31 32 33 34 35 36 37 20 21 22 23 24 25 26 27)
826        pshufd  xmm2,xmm4,0x4E  ; xmm2=(50 51 52 53 54 55 56 57 40 41 42 43 44 45 46 47)
827        pshufd  xmm5,xmm3,0x4E  ; xmm5=(70 71 72 73 74 75 76 77 60 61 62 63 64 65 66 67)
828
829        mov     edx, JSAMPROW [edi+0*SIZEOF_JSAMPROW]
830        mov     esi, JSAMPROW [edi+2*SIZEOF_JSAMPROW]
831        movq    XMM_MMWORD [edx+eax*SIZEOF_JSAMPLE], xmm7
832        movq    XMM_MMWORD [esi+eax*SIZEOF_JSAMPLE], xmm1
833        mov     edx, JSAMPROW [edi+4*SIZEOF_JSAMPROW]
834        mov     esi, JSAMPROW [edi+6*SIZEOF_JSAMPROW]
835        movq    XMM_MMWORD [edx+eax*SIZEOF_JSAMPLE], xmm4
836        movq    XMM_MMWORD [esi+eax*SIZEOF_JSAMPLE], xmm3
837
838        mov     edx, JSAMPROW [edi+1*SIZEOF_JSAMPROW]
839        mov     esi, JSAMPROW [edi+3*SIZEOF_JSAMPROW]
840        movq    XMM_MMWORD [edx+eax*SIZEOF_JSAMPLE], xmm6
841        movq    XMM_MMWORD [esi+eax*SIZEOF_JSAMPLE], xmm0
842        mov     edx, JSAMPROW [edi+5*SIZEOF_JSAMPROW]
843        mov     esi, JSAMPROW [edi+7*SIZEOF_JSAMPROW]
844        movq    XMM_MMWORD [edx+eax*SIZEOF_JSAMPLE], xmm2
845        movq    XMM_MMWORD [esi+eax*SIZEOF_JSAMPLE], xmm5
846
847        pop     edi
848        pop     esi
849;       pop     edx             ; need not be preserved
850;       pop     ecx             ; unused
851        poppic  ebx
852        mov     esp,ebp         ; esp <- aligned ebp
853        pop     esp             ; esp <- original ebp
854        pop     ebp
855        ret
856
857; For some reason, the OS X linker does not honor the request to align the
858; segment unless we do this.
859        align   16
860