1;
2; jquantf.asm - sample data conversion and quantization (64-bit SSE & SSE2)
3;
4; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
5; Copyright (C) 2009, 2016, D. R. Commander.
6; Copyright (C) 2018, Matthias Räncker.
7;
8; Based on the x86 SIMD extension for IJG JPEG library
9; Copyright (C) 1999-2006, MIYASAKA Masaru.
10; For conditions of distribution and use, see copyright notice in jsimdext.inc
11;
12; This file should be assembled with NASM (Netwide Assembler),
13; can *not* be assembled with Microsoft's MASM or any compatible
14; assembler (including Borland's Turbo Assembler).
15; NASM is available from http://nasm.sourceforge.net/ or
16; http://sourceforge.net/project/showfiles.php?group_id=6208
17
18%include "jsimdext.inc"
19%include "jdct.inc"
20
21; --------------------------------------------------------------------------
22    SECTION     SEG_TEXT
23    BITS        64
24;
25; Load data into workspace, applying unsigned->signed conversion
26;
27; GLOBAL(void)
28; jsimd_convsamp_float_sse2(JSAMPARRAY sample_data, JDIMENSION start_col,
29;                           FAST_FLOAT *workspace);
30;
31
32; r10 = JSAMPARRAY sample_data
33; r11d = JDIMENSION start_col
34; r12 = FAST_FLOAT *workspace
35
36    align       32
37    GLOBAL_FUNCTION(jsimd_convsamp_float_sse2)
38
39EXTN(jsimd_convsamp_float_sse2):
40    push        rbp
41    mov         rax, rsp
42    mov         rbp, rsp
43    collect_args 3
44    push        rbx
45
46    pcmpeqw     xmm7, xmm7
47    psllw       xmm7, 7
48    packsswb    xmm7, xmm7              ; xmm7 = PB_CENTERJSAMPLE (0x808080..)
49
50    mov         rsi, r10
51    mov         eax, r11d
52    mov         rdi, r12
53    mov         rcx, DCTSIZE/2
54.convloop:
55    mov         rbxp, JSAMPROW [rsi+0*SIZEOF_JSAMPROW]  ; (JSAMPLE *)
56    mov         rdxp, JSAMPROW [rsi+1*SIZEOF_JSAMPROW]  ; (JSAMPLE *)
57
58    movq        xmm0, XMM_MMWORD [rbx+rax*SIZEOF_JSAMPLE]
59    movq        xmm1, XMM_MMWORD [rdx+rax*SIZEOF_JSAMPLE]
60
61    psubb       xmm0, xmm7              ; xmm0=(01234567)
62    psubb       xmm1, xmm7              ; xmm1=(89ABCDEF)
63
64    punpcklbw   xmm0, xmm0              ; xmm0=(*0*1*2*3*4*5*6*7)
65    punpcklbw   xmm1, xmm1              ; xmm1=(*8*9*A*B*C*D*E*F)
66
67    punpcklwd   xmm2, xmm0              ; xmm2=(***0***1***2***3)
68    punpckhwd   xmm0, xmm0              ; xmm0=(***4***5***6***7)
69    punpcklwd   xmm3, xmm1              ; xmm3=(***8***9***A***B)
70    punpckhwd   xmm1, xmm1              ; xmm1=(***C***D***E***F)
71
72    psrad       xmm2, (DWORD_BIT-BYTE_BIT)  ; xmm2=(0123)
73    psrad       xmm0, (DWORD_BIT-BYTE_BIT)  ; xmm0=(4567)
74    cvtdq2ps    xmm2, xmm2                  ; xmm2=(0123)
75    cvtdq2ps    xmm0, xmm0                  ; xmm0=(4567)
76    psrad       xmm3, (DWORD_BIT-BYTE_BIT)  ; xmm3=(89AB)
77    psrad       xmm1, (DWORD_BIT-BYTE_BIT)  ; xmm1=(CDEF)
78    cvtdq2ps    xmm3, xmm3                  ; xmm3=(89AB)
79    cvtdq2ps    xmm1, xmm1                  ; xmm1=(CDEF)
80
81    movaps      XMMWORD [XMMBLOCK(0,0,rdi,SIZEOF_FAST_FLOAT)], xmm2
82    movaps      XMMWORD [XMMBLOCK(0,1,rdi,SIZEOF_FAST_FLOAT)], xmm0
83    movaps      XMMWORD [XMMBLOCK(1,0,rdi,SIZEOF_FAST_FLOAT)], xmm3
84    movaps      XMMWORD [XMMBLOCK(1,1,rdi,SIZEOF_FAST_FLOAT)], xmm1
85
86    add         rsi, byte 2*SIZEOF_JSAMPROW
87    add         rdi, byte 2*DCTSIZE*SIZEOF_FAST_FLOAT
88    dec         rcx
89    jnz         short .convloop
90
91    pop         rbx
92    uncollect_args 3
93    pop         rbp
94    ret
95
96; --------------------------------------------------------------------------
97;
98; Quantize/descale the coefficients, and store into coef_block
99;
100; GLOBAL(void)
101; jsimd_quantize_float_sse2(JCOEFPTR coef_block, FAST_FLOAT *divisors,
102;                           FAST_FLOAT *workspace);
103;
104
105; r10 = JCOEFPTR coef_block
106; r11 = FAST_FLOAT *divisors
107; r12 = FAST_FLOAT *workspace
108
109    align       32
110    GLOBAL_FUNCTION(jsimd_quantize_float_sse2)
111
112EXTN(jsimd_quantize_float_sse2):
113    push        rbp
114    mov         rax, rsp
115    mov         rbp, rsp
116    collect_args 3
117
118    mov         rsi, r12
119    mov         rdx, r11
120    mov         rdi, r10
121    mov         rax, DCTSIZE2/16
122.quantloop:
123    movaps      xmm0, XMMWORD [XMMBLOCK(0,0,rsi,SIZEOF_FAST_FLOAT)]
124    movaps      xmm1, XMMWORD [XMMBLOCK(0,1,rsi,SIZEOF_FAST_FLOAT)]
125    mulps       xmm0, XMMWORD [XMMBLOCK(0,0,rdx,SIZEOF_FAST_FLOAT)]
126    mulps       xmm1, XMMWORD [XMMBLOCK(0,1,rdx,SIZEOF_FAST_FLOAT)]
127    movaps      xmm2, XMMWORD [XMMBLOCK(1,0,rsi,SIZEOF_FAST_FLOAT)]
128    movaps      xmm3, XMMWORD [XMMBLOCK(1,1,rsi,SIZEOF_FAST_FLOAT)]
129    mulps       xmm2, XMMWORD [XMMBLOCK(1,0,rdx,SIZEOF_FAST_FLOAT)]
130    mulps       xmm3, XMMWORD [XMMBLOCK(1,1,rdx,SIZEOF_FAST_FLOAT)]
131
132    cvtps2dq    xmm0, xmm0
133    cvtps2dq    xmm1, xmm1
134    cvtps2dq    xmm2, xmm2
135    cvtps2dq    xmm3, xmm3
136
137    packssdw    xmm0, xmm1
138    packssdw    xmm2, xmm3
139
140    movdqa      XMMWORD [XMMBLOCK(0,0,rdi,SIZEOF_JCOEF)], xmm0
141    movdqa      XMMWORD [XMMBLOCK(1,0,rdi,SIZEOF_JCOEF)], xmm2
142
143    add         rsi, byte 16*SIZEOF_FAST_FLOAT
144    add         rdx, byte 16*SIZEOF_FAST_FLOAT
145    add         rdi, byte 16*SIZEOF_JCOEF
146    dec         rax
147    jnz         short .quantloop
148
149    uncollect_args 3
150    pop         rbp
151    ret
152
153; For some reason, the OS X linker does not honor the request to align the
154; segment unless we do this.
155    align       32
156