1#!/usr/bin/env perl 2# 3# ==================================================================== 4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 5# project. The module is, however, dual licensed under OpenSSL and 6# CRYPTOGAMS licenses depending on where you obtain it. For further 7# details see http://www.openssl.org/~appro/cryptogams/. 8# ==================================================================== 9# 10# 11# AES-NI-CTR+GHASH stitch. 12# 13# February 2013 14# 15# OpenSSL GCM implementation is organized in such way that its 16# performance is rather close to the sum of its streamed components, 17# in the context parallelized AES-NI CTR and modulo-scheduled 18# PCLMULQDQ-enabled GHASH. Unfortunately, as no stitch implementation 19# was observed to perform significantly better than the sum of the 20# components on contemporary CPUs, the effort was deemed impossible to 21# justify. This module is based on combination of Intel submissions, 22# [1] and [2], with MOVBE twist suggested by Ilya Albrekht and Max 23# Locktyukhin of Intel Corp. who verified that it reduces shuffles 24# pressure with notable relative improvement, achieving 1.0 cycle per 25# byte processed with 128-bit key on Haswell processor, and 0.74 - 26# on Broadwell. [Mentioned results are raw profiled measurements for 27# favourable packet size, one divisible by 96. Applications using the 28# EVP interface will observe a few percent worse performance.] 29# 30# [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest 31# [2] http://www.intel.com/content/dam/www/public/us/en/documents/software-support/enabling-high-performance-gcm.pdf 32 33$flavour = shift; 34$output = shift; 35if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } 36 37$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); 38 39$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 40( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or 41( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or 42die "can't locate x86_64-xlate.pl"; 43 44# In upstream, this is controlled by shelling out to the compiler to check 45# versions, but BoringSSL is intended to be used with pre-generated perlasm 46# output, so this isn't useful anyway. 47# 48# TODO(davidben): Enable this after testing. $avx goes up to 2. 49$avx = 0; 50 51open OUT,"| \"$^X\" $xlate $flavour $output"; 52*STDOUT=*OUT; 53 54if ($avx>1) {{{ 55 56($inp,$out,$len,$key,$ivp,$Xip)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9"); 57 58($Ii,$T1,$T2,$Hkey, 59 $Z0,$Z1,$Z2,$Z3,$Xi) = map("%xmm$_",(0..8)); 60 61($inout0,$inout1,$inout2,$inout3,$inout4,$inout5,$rndkey) = map("%xmm$_",(9..15)); 62 63($counter,$rounds,$ret,$const,$in0,$end0)=("%ebx","%ebp","%r10","%r11","%r14","%r15"); 64 65$code=<<___; 66.text 67 68.type _aesni_ctr32_ghash_6x,\@abi-omnipotent 69.align 32 70_aesni_ctr32_ghash_6x: 71 vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb 72 sub \$6,$len 73 vpxor $Z0,$Z0,$Z0 # $Z0 = 0 74 vmovdqu 0x00-0x80($key),$rndkey 75 vpaddb $T2,$T1,$inout1 76 vpaddb $T2,$inout1,$inout2 77 vpaddb $T2,$inout2,$inout3 78 vpaddb $T2,$inout3,$inout4 79 vpaddb $T2,$inout4,$inout5 80 vpxor $rndkey,$T1,$inout0 81 vmovdqu $Z0,16+8(%rsp) # "$Z3" = 0 82 jmp .Loop6x 83 84.align 32 85.Loop6x: 86 add \$`6<<24`,$counter 87 jc .Lhandle_ctr32 # discard $inout[1-5]? 88 vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1 89 vpaddb $T2,$inout5,$T1 # next counter value 90 vpxor $rndkey,$inout1,$inout1 91 vpxor $rndkey,$inout2,$inout2 92 93.Lresume_ctr32: 94 vmovdqu $T1,($ivp) # save next counter value 95 vpclmulqdq \$0x10,$Hkey,$Z3,$Z1 96 vpxor $rndkey,$inout3,$inout3 97 vmovups 0x10-0x80($key),$T2 # borrow $T2 for $rndkey 98 vpclmulqdq \$0x01,$Hkey,$Z3,$Z2 99 xor %r12,%r12 100 cmp $in0,$end0 101 102 vaesenc $T2,$inout0,$inout0 103 vmovdqu 0x30+8(%rsp),$Ii # I[4] 104 vpxor $rndkey,$inout4,$inout4 105 vpclmulqdq \$0x00,$Hkey,$Z3,$T1 106 vaesenc $T2,$inout1,$inout1 107 vpxor $rndkey,$inout5,$inout5 108 setnc %r12b 109 vpclmulqdq \$0x11,$Hkey,$Z3,$Z3 110 vaesenc $T2,$inout2,$inout2 111 vmovdqu 0x10-0x20($Xip),$Hkey # $Hkey^2 112 neg %r12 113 vaesenc $T2,$inout3,$inout3 114 vpxor $Z1,$Z2,$Z2 115 vpclmulqdq \$0x00,$Hkey,$Ii,$Z1 116 vpxor $Z0,$Xi,$Xi # modulo-scheduled 117 vaesenc $T2,$inout4,$inout4 118 vpxor $Z1,$T1,$Z0 119 and \$0x60,%r12 120 vmovups 0x20-0x80($key),$rndkey 121 vpclmulqdq \$0x10,$Hkey,$Ii,$T1 122 vaesenc $T2,$inout5,$inout5 123 124 vpclmulqdq \$0x01,$Hkey,$Ii,$T2 125 lea ($in0,%r12),$in0 126 vaesenc $rndkey,$inout0,$inout0 127 vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled [vpxor $Z3,$Xi,$Xi] 128 vpclmulqdq \$0x11,$Hkey,$Ii,$Hkey 129 vmovdqu 0x40+8(%rsp),$Ii # I[3] 130 vaesenc $rndkey,$inout1,$inout1 131 movbe 0x58($in0),%r13 132 vaesenc $rndkey,$inout2,$inout2 133 movbe 0x50($in0),%r12 134 vaesenc $rndkey,$inout3,$inout3 135 mov %r13,0x20+8(%rsp) 136 vaesenc $rndkey,$inout4,$inout4 137 mov %r12,0x28+8(%rsp) 138 vmovdqu 0x30-0x20($Xip),$Z1 # borrow $Z1 for $Hkey^3 139 vaesenc $rndkey,$inout5,$inout5 140 141 vmovups 0x30-0x80($key),$rndkey 142 vpxor $T1,$Z2,$Z2 143 vpclmulqdq \$0x00,$Z1,$Ii,$T1 144 vaesenc $rndkey,$inout0,$inout0 145 vpxor $T2,$Z2,$Z2 146 vpclmulqdq \$0x10,$Z1,$Ii,$T2 147 vaesenc $rndkey,$inout1,$inout1 148 vpxor $Hkey,$Z3,$Z3 149 vpclmulqdq \$0x01,$Z1,$Ii,$Hkey 150 vaesenc $rndkey,$inout2,$inout2 151 vpclmulqdq \$0x11,$Z1,$Ii,$Z1 152 vmovdqu 0x50+8(%rsp),$Ii # I[2] 153 vaesenc $rndkey,$inout3,$inout3 154 vaesenc $rndkey,$inout4,$inout4 155 vpxor $T1,$Z0,$Z0 156 vmovdqu 0x40-0x20($Xip),$T1 # borrow $T1 for $Hkey^4 157 vaesenc $rndkey,$inout5,$inout5 158 159 vmovups 0x40-0x80($key),$rndkey 160 vpxor $T2,$Z2,$Z2 161 vpclmulqdq \$0x00,$T1,$Ii,$T2 162 vaesenc $rndkey,$inout0,$inout0 163 vpxor $Hkey,$Z2,$Z2 164 vpclmulqdq \$0x10,$T1,$Ii,$Hkey 165 vaesenc $rndkey,$inout1,$inout1 166 movbe 0x48($in0),%r13 167 vpxor $Z1,$Z3,$Z3 168 vpclmulqdq \$0x01,$T1,$Ii,$Z1 169 vaesenc $rndkey,$inout2,$inout2 170 movbe 0x40($in0),%r12 171 vpclmulqdq \$0x11,$T1,$Ii,$T1 172 vmovdqu 0x60+8(%rsp),$Ii # I[1] 173 vaesenc $rndkey,$inout3,$inout3 174 mov %r13,0x30+8(%rsp) 175 vaesenc $rndkey,$inout4,$inout4 176 mov %r12,0x38+8(%rsp) 177 vpxor $T2,$Z0,$Z0 178 vmovdqu 0x60-0x20($Xip),$T2 # borrow $T2 for $Hkey^5 179 vaesenc $rndkey,$inout5,$inout5 180 181 vmovups 0x50-0x80($key),$rndkey 182 vpxor $Hkey,$Z2,$Z2 183 vpclmulqdq \$0x00,$T2,$Ii,$Hkey 184 vaesenc $rndkey,$inout0,$inout0 185 vpxor $Z1,$Z2,$Z2 186 vpclmulqdq \$0x10,$T2,$Ii,$Z1 187 vaesenc $rndkey,$inout1,$inout1 188 movbe 0x38($in0),%r13 189 vpxor $T1,$Z3,$Z3 190 vpclmulqdq \$0x01,$T2,$Ii,$T1 191 vpxor 0x70+8(%rsp),$Xi,$Xi # accumulate I[0] 192 vaesenc $rndkey,$inout2,$inout2 193 movbe 0x30($in0),%r12 194 vpclmulqdq \$0x11,$T2,$Ii,$T2 195 vaesenc $rndkey,$inout3,$inout3 196 mov %r13,0x40+8(%rsp) 197 vaesenc $rndkey,$inout4,$inout4 198 mov %r12,0x48+8(%rsp) 199 vpxor $Hkey,$Z0,$Z0 200 vmovdqu 0x70-0x20($Xip),$Hkey # $Hkey^6 201 vaesenc $rndkey,$inout5,$inout5 202 203 vmovups 0x60-0x80($key),$rndkey 204 vpxor $Z1,$Z2,$Z2 205 vpclmulqdq \$0x10,$Hkey,$Xi,$Z1 206 vaesenc $rndkey,$inout0,$inout0 207 vpxor $T1,$Z2,$Z2 208 vpclmulqdq \$0x01,$Hkey,$Xi,$T1 209 vaesenc $rndkey,$inout1,$inout1 210 movbe 0x28($in0),%r13 211 vpxor $T2,$Z3,$Z3 212 vpclmulqdq \$0x00,$Hkey,$Xi,$T2 213 vaesenc $rndkey,$inout2,$inout2 214 movbe 0x20($in0),%r12 215 vpclmulqdq \$0x11,$Hkey,$Xi,$Xi 216 vaesenc $rndkey,$inout3,$inout3 217 mov %r13,0x50+8(%rsp) 218 vaesenc $rndkey,$inout4,$inout4 219 mov %r12,0x58+8(%rsp) 220 vpxor $Z1,$Z2,$Z2 221 vaesenc $rndkey,$inout5,$inout5 222 vpxor $T1,$Z2,$Z2 223 224 vmovups 0x70-0x80($key),$rndkey 225 vpslldq \$8,$Z2,$Z1 226 vpxor $T2,$Z0,$Z0 227 vmovdqu 0x10($const),$Hkey # .Lpoly 228 229 vaesenc $rndkey,$inout0,$inout0 230 vpxor $Xi,$Z3,$Z3 231 vaesenc $rndkey,$inout1,$inout1 232 vpxor $Z1,$Z0,$Z0 233 movbe 0x18($in0),%r13 234 vaesenc $rndkey,$inout2,$inout2 235 movbe 0x10($in0),%r12 236 vpalignr \$8,$Z0,$Z0,$Ii # 1st phase 237 vpclmulqdq \$0x10,$Hkey,$Z0,$Z0 238 mov %r13,0x60+8(%rsp) 239 vaesenc $rndkey,$inout3,$inout3 240 mov %r12,0x68+8(%rsp) 241 vaesenc $rndkey,$inout4,$inout4 242 vmovups 0x80-0x80($key),$T1 # borrow $T1 for $rndkey 243 vaesenc $rndkey,$inout5,$inout5 244 245 vaesenc $T1,$inout0,$inout0 246 vmovups 0x90-0x80($key),$rndkey 247 vaesenc $T1,$inout1,$inout1 248 vpsrldq \$8,$Z2,$Z2 249 vaesenc $T1,$inout2,$inout2 250 vpxor $Z2,$Z3,$Z3 251 vaesenc $T1,$inout3,$inout3 252 vpxor $Ii,$Z0,$Z0 253 movbe 0x08($in0),%r13 254 vaesenc $T1,$inout4,$inout4 255 movbe 0x00($in0),%r12 256 vaesenc $T1,$inout5,$inout5 257 vmovups 0xa0-0x80($key),$T1 258 cmp \$11,$rounds 259 jb .Lenc_tail # 128-bit key 260 261 vaesenc $rndkey,$inout0,$inout0 262 vaesenc $rndkey,$inout1,$inout1 263 vaesenc $rndkey,$inout2,$inout2 264 vaesenc $rndkey,$inout3,$inout3 265 vaesenc $rndkey,$inout4,$inout4 266 vaesenc $rndkey,$inout5,$inout5 267 268 vaesenc $T1,$inout0,$inout0 269 vaesenc $T1,$inout1,$inout1 270 vaesenc $T1,$inout2,$inout2 271 vaesenc $T1,$inout3,$inout3 272 vaesenc $T1,$inout4,$inout4 273 vmovups 0xb0-0x80($key),$rndkey 274 vaesenc $T1,$inout5,$inout5 275 vmovups 0xc0-0x80($key),$T1 276 je .Lenc_tail # 192-bit key 277 278 vaesenc $rndkey,$inout0,$inout0 279 vaesenc $rndkey,$inout1,$inout1 280 vaesenc $rndkey,$inout2,$inout2 281 vaesenc $rndkey,$inout3,$inout3 282 vaesenc $rndkey,$inout4,$inout4 283 vaesenc $rndkey,$inout5,$inout5 284 285 vaesenc $T1,$inout0,$inout0 286 vaesenc $T1,$inout1,$inout1 287 vaesenc $T1,$inout2,$inout2 288 vaesenc $T1,$inout3,$inout3 289 vaesenc $T1,$inout4,$inout4 290 vmovups 0xd0-0x80($key),$rndkey 291 vaesenc $T1,$inout5,$inout5 292 vmovups 0xe0-0x80($key),$T1 293 jmp .Lenc_tail # 256-bit key 294 295.align 32 296.Lhandle_ctr32: 297 vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask 298 vpshufb $Ii,$T1,$Z2 # byte-swap counter 299 vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb 300 vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb 301 vpaddd $Z1,$Z2,$inout2 302 vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1 303 vpaddd $Z1,$inout1,$inout3 304 vpshufb $Ii,$inout1,$inout1 305 vpaddd $Z1,$inout2,$inout4 306 vpshufb $Ii,$inout2,$inout2 307 vpxor $rndkey,$inout1,$inout1 308 vpaddd $Z1,$inout3,$inout5 309 vpshufb $Ii,$inout3,$inout3 310 vpxor $rndkey,$inout2,$inout2 311 vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value 312 vpshufb $Ii,$inout4,$inout4 313 vpshufb $Ii,$inout5,$inout5 314 vpshufb $Ii,$T1,$T1 # next counter value 315 jmp .Lresume_ctr32 316 317.align 32 318.Lenc_tail: 319 vaesenc $rndkey,$inout0,$inout0 320 vmovdqu $Z3,16+8(%rsp) # postpone vpxor $Z3,$Xi,$Xi 321 vpalignr \$8,$Z0,$Z0,$Xi # 2nd phase 322 vaesenc $rndkey,$inout1,$inout1 323 vpclmulqdq \$0x10,$Hkey,$Z0,$Z0 324 vpxor 0x00($inp),$T1,$T2 325 vaesenc $rndkey,$inout2,$inout2 326 vpxor 0x10($inp),$T1,$Ii 327 vaesenc $rndkey,$inout3,$inout3 328 vpxor 0x20($inp),$T1,$Z1 329 vaesenc $rndkey,$inout4,$inout4 330 vpxor 0x30($inp),$T1,$Z2 331 vaesenc $rndkey,$inout5,$inout5 332 vpxor 0x40($inp),$T1,$Z3 333 vpxor 0x50($inp),$T1,$Hkey 334 vmovdqu ($ivp),$T1 # load next counter value 335 336 vaesenclast $T2,$inout0,$inout0 337 vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb 338 vaesenclast $Ii,$inout1,$inout1 339 vpaddb $T2,$T1,$Ii 340 mov %r13,0x70+8(%rsp) 341 lea 0x60($inp),$inp 342 vaesenclast $Z1,$inout2,$inout2 343 vpaddb $T2,$Ii,$Z1 344 mov %r12,0x78+8(%rsp) 345 lea 0x60($out),$out 346 vmovdqu 0x00-0x80($key),$rndkey 347 vaesenclast $Z2,$inout3,$inout3 348 vpaddb $T2,$Z1,$Z2 349 vaesenclast $Z3, $inout4,$inout4 350 vpaddb $T2,$Z2,$Z3 351 vaesenclast $Hkey,$inout5,$inout5 352 vpaddb $T2,$Z3,$Hkey 353 354 add \$0x60,$ret 355 sub \$0x6,$len 356 jc .L6x_done 357 358 vmovups $inout0,-0x60($out) # save output 359 vpxor $rndkey,$T1,$inout0 360 vmovups $inout1,-0x50($out) 361 vmovdqa $Ii,$inout1 # 0 latency 362 vmovups $inout2,-0x40($out) 363 vmovdqa $Z1,$inout2 # 0 latency 364 vmovups $inout3,-0x30($out) 365 vmovdqa $Z2,$inout3 # 0 latency 366 vmovups $inout4,-0x20($out) 367 vmovdqa $Z3,$inout4 # 0 latency 368 vmovups $inout5,-0x10($out) 369 vmovdqa $Hkey,$inout5 # 0 latency 370 vmovdqu 0x20+8(%rsp),$Z3 # I[5] 371 jmp .Loop6x 372 373.L6x_done: 374 vpxor 16+8(%rsp),$Xi,$Xi # modulo-scheduled 375 vpxor $Z0,$Xi,$Xi # modulo-scheduled 376 377 ret 378.size _aesni_ctr32_ghash_6x,.-_aesni_ctr32_ghash_6x 379___ 380###################################################################### 381# 382# size_t aesni_gcm_[en|de]crypt(const void *inp, void *out, size_t len, 383# const AES_KEY *key, unsigned char iv[16], 384# struct { u128 Xi,H,Htbl[9]; } *Xip); 385$code.=<<___; 386.globl aesni_gcm_decrypt 387.type aesni_gcm_decrypt,\@function,6 388.align 32 389aesni_gcm_decrypt: 390 xor $ret,$ret 391 cmp \$0x60,$len # minimal accepted length 392 jb .Lgcm_dec_abort 393 394 lea (%rsp),%rax # save stack pointer 395 push %rbx 396 push %rbp 397 push %r12 398 push %r13 399 push %r14 400 push %r15 401___ 402$code.=<<___ if ($win64); 403 lea -0xa8(%rsp),%rsp 404 movaps %xmm6,-0xd8(%rax) 405 movaps %xmm7,-0xc8(%rax) 406 movaps %xmm8,-0xb8(%rax) 407 movaps %xmm9,-0xa8(%rax) 408 movaps %xmm10,-0x98(%rax) 409 movaps %xmm11,-0x88(%rax) 410 movaps %xmm12,-0x78(%rax) 411 movaps %xmm13,-0x68(%rax) 412 movaps %xmm14,-0x58(%rax) 413 movaps %xmm15,-0x48(%rax) 414.Lgcm_dec_body: 415___ 416$code.=<<___; 417 vzeroupper 418 419 vmovdqu ($ivp),$T1 # input counter value 420 add \$-128,%rsp 421 mov 12($ivp),$counter 422 lea .Lbswap_mask(%rip),$const 423 lea -0x80($key),$in0 # borrow $in0 424 mov \$0xf80,$end0 # borrow $end0 425 vmovdqu ($Xip),$Xi # load Xi 426 and \$-128,%rsp # ensure stack alignment 427 vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask 428 lea 0x80($key),$key # size optimization 429 lea 0x20+0x20($Xip),$Xip # size optimization 430 mov 0xf0-0x80($key),$rounds 431 vpshufb $Ii,$Xi,$Xi 432 433 and $end0,$in0 434 and %rsp,$end0 435 sub $in0,$end0 436 jc .Ldec_no_key_aliasing 437 cmp \$768,$end0 438 jnc .Ldec_no_key_aliasing 439 sub $end0,%rsp # avoid aliasing with key 440.Ldec_no_key_aliasing: 441 442 vmovdqu 0x50($inp),$Z3 # I[5] 443 lea ($inp),$in0 444 vmovdqu 0x40($inp),$Z0 445 lea -0xc0($inp,$len),$end0 446 vmovdqu 0x30($inp),$Z1 447 shr \$4,$len 448 xor $ret,$ret 449 vmovdqu 0x20($inp),$Z2 450 vpshufb $Ii,$Z3,$Z3 # passed to _aesni_ctr32_ghash_6x 451 vmovdqu 0x10($inp),$T2 452 vpshufb $Ii,$Z0,$Z0 453 vmovdqu ($inp),$Hkey 454 vpshufb $Ii,$Z1,$Z1 455 vmovdqu $Z0,0x30(%rsp) 456 vpshufb $Ii,$Z2,$Z2 457 vmovdqu $Z1,0x40(%rsp) 458 vpshufb $Ii,$T2,$T2 459 vmovdqu $Z2,0x50(%rsp) 460 vpshufb $Ii,$Hkey,$Hkey 461 vmovdqu $T2,0x60(%rsp) 462 vmovdqu $Hkey,0x70(%rsp) 463 464 call _aesni_ctr32_ghash_6x 465 466 vmovups $inout0,-0x60($out) # save output 467 vmovups $inout1,-0x50($out) 468 vmovups $inout2,-0x40($out) 469 vmovups $inout3,-0x30($out) 470 vmovups $inout4,-0x20($out) 471 vmovups $inout5,-0x10($out) 472 473 vpshufb ($const),$Xi,$Xi # .Lbswap_mask 474 vmovdqu $Xi,-0x40($Xip) # output Xi 475 476 vzeroupper 477___ 478$code.=<<___ if ($win64); 479 movaps -0xd8(%rax),%xmm6 480 movaps -0xd8(%rax),%xmm7 481 movaps -0xb8(%rax),%xmm8 482 movaps -0xa8(%rax),%xmm9 483 movaps -0x98(%rax),%xmm10 484 movaps -0x88(%rax),%xmm11 485 movaps -0x78(%rax),%xmm12 486 movaps -0x68(%rax),%xmm13 487 movaps -0x58(%rax),%xmm14 488 movaps -0x48(%rax),%xmm15 489___ 490$code.=<<___; 491 mov -48(%rax),%r15 492 mov -40(%rax),%r14 493 mov -32(%rax),%r13 494 mov -24(%rax),%r12 495 mov -16(%rax),%rbp 496 mov -8(%rax),%rbx 497 lea (%rax),%rsp # restore %rsp 498.Lgcm_dec_abort: 499 mov $ret,%rax # return value 500 ret 501.size aesni_gcm_decrypt,.-aesni_gcm_decrypt 502___ 503 504$code.=<<___; 505.type _aesni_ctr32_6x,\@abi-omnipotent 506.align 32 507_aesni_ctr32_6x: 508 vmovdqu 0x00-0x80($key),$Z0 # borrow $Z0 for $rndkey 509 vmovdqu 0x20($const),$T2 # borrow $T2, .Lone_msb 510 lea -1($rounds),%r13 511 vmovups 0x10-0x80($key),$rndkey 512 lea 0x20-0x80($key),%r12 513 vpxor $Z0,$T1,$inout0 514 add \$`6<<24`,$counter 515 jc .Lhandle_ctr32_2 516 vpaddb $T2,$T1,$inout1 517 vpaddb $T2,$inout1,$inout2 518 vpxor $Z0,$inout1,$inout1 519 vpaddb $T2,$inout2,$inout3 520 vpxor $Z0,$inout2,$inout2 521 vpaddb $T2,$inout3,$inout4 522 vpxor $Z0,$inout3,$inout3 523 vpaddb $T2,$inout4,$inout5 524 vpxor $Z0,$inout4,$inout4 525 vpaddb $T2,$inout5,$T1 526 vpxor $Z0,$inout5,$inout5 527 jmp .Loop_ctr32 528 529.align 16 530.Loop_ctr32: 531 vaesenc $rndkey,$inout0,$inout0 532 vaesenc $rndkey,$inout1,$inout1 533 vaesenc $rndkey,$inout2,$inout2 534 vaesenc $rndkey,$inout3,$inout3 535 vaesenc $rndkey,$inout4,$inout4 536 vaesenc $rndkey,$inout5,$inout5 537 vmovups (%r12),$rndkey 538 lea 0x10(%r12),%r12 539 dec %r13d 540 jnz .Loop_ctr32 541 542 vmovdqu (%r12),$Hkey # last round key 543 vaesenc $rndkey,$inout0,$inout0 544 vpxor 0x00($inp),$Hkey,$Z0 545 vaesenc $rndkey,$inout1,$inout1 546 vpxor 0x10($inp),$Hkey,$Z1 547 vaesenc $rndkey,$inout2,$inout2 548 vpxor 0x20($inp),$Hkey,$Z2 549 vaesenc $rndkey,$inout3,$inout3 550 vpxor 0x30($inp),$Hkey,$Xi 551 vaesenc $rndkey,$inout4,$inout4 552 vpxor 0x40($inp),$Hkey,$T2 553 vaesenc $rndkey,$inout5,$inout5 554 vpxor 0x50($inp),$Hkey,$Hkey 555 lea 0x60($inp),$inp 556 557 vaesenclast $Z0,$inout0,$inout0 558 vaesenclast $Z1,$inout1,$inout1 559 vaesenclast $Z2,$inout2,$inout2 560 vaesenclast $Xi,$inout3,$inout3 561 vaesenclast $T2,$inout4,$inout4 562 vaesenclast $Hkey,$inout5,$inout5 563 vmovups $inout0,0x00($out) 564 vmovups $inout1,0x10($out) 565 vmovups $inout2,0x20($out) 566 vmovups $inout3,0x30($out) 567 vmovups $inout4,0x40($out) 568 vmovups $inout5,0x50($out) 569 lea 0x60($out),$out 570 571 ret 572.align 32 573.Lhandle_ctr32_2: 574 vpshufb $Ii,$T1,$Z2 # byte-swap counter 575 vmovdqu 0x30($const),$Z1 # borrow $Z1, .Ltwo_lsb 576 vpaddd 0x40($const),$Z2,$inout1 # .Lone_lsb 577 vpaddd $Z1,$Z2,$inout2 578 vpaddd $Z1,$inout1,$inout3 579 vpshufb $Ii,$inout1,$inout1 580 vpaddd $Z1,$inout2,$inout4 581 vpshufb $Ii,$inout2,$inout2 582 vpxor $Z0,$inout1,$inout1 583 vpaddd $Z1,$inout3,$inout5 584 vpshufb $Ii,$inout3,$inout3 585 vpxor $Z0,$inout2,$inout2 586 vpaddd $Z1,$inout4,$T1 # byte-swapped next counter value 587 vpshufb $Ii,$inout4,$inout4 588 vpxor $Z0,$inout3,$inout3 589 vpshufb $Ii,$inout5,$inout5 590 vpxor $Z0,$inout4,$inout4 591 vpshufb $Ii,$T1,$T1 # next counter value 592 vpxor $Z0,$inout5,$inout5 593 jmp .Loop_ctr32 594.size _aesni_ctr32_6x,.-_aesni_ctr32_6x 595 596.globl aesni_gcm_encrypt 597.type aesni_gcm_encrypt,\@function,6 598.align 32 599aesni_gcm_encrypt: 600 xor $ret,$ret 601 cmp \$0x60*3,$len # minimal accepted length 602 jb .Lgcm_enc_abort 603 604 lea (%rsp),%rax # save stack pointer 605 push %rbx 606 push %rbp 607 push %r12 608 push %r13 609 push %r14 610 push %r15 611___ 612$code.=<<___ if ($win64); 613 lea -0xa8(%rsp),%rsp 614 movaps %xmm6,-0xd8(%rax) 615 movaps %xmm7,-0xc8(%rax) 616 movaps %xmm8,-0xb8(%rax) 617 movaps %xmm9,-0xa8(%rax) 618 movaps %xmm10,-0x98(%rax) 619 movaps %xmm11,-0x88(%rax) 620 movaps %xmm12,-0x78(%rax) 621 movaps %xmm13,-0x68(%rax) 622 movaps %xmm14,-0x58(%rax) 623 movaps %xmm15,-0x48(%rax) 624.Lgcm_enc_body: 625___ 626$code.=<<___; 627 vzeroupper 628 629 vmovdqu ($ivp),$T1 # input counter value 630 add \$-128,%rsp 631 mov 12($ivp),$counter 632 lea .Lbswap_mask(%rip),$const 633 lea -0x80($key),$in0 # borrow $in0 634 mov \$0xf80,$end0 # borrow $end0 635 lea 0x80($key),$key # size optimization 636 vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask 637 and \$-128,%rsp # ensure stack alignment 638 mov 0xf0-0x80($key),$rounds 639 640 and $end0,$in0 641 and %rsp,$end0 642 sub $in0,$end0 643 jc .Lenc_no_key_aliasing 644 cmp \$768,$end0 645 jnc .Lenc_no_key_aliasing 646 sub $end0,%rsp # avoid aliasing with key 647.Lenc_no_key_aliasing: 648 649 lea ($out),$in0 650 lea -0xc0($out,$len),$end0 651 shr \$4,$len 652 653 call _aesni_ctr32_6x 654 vpshufb $Ii,$inout0,$Xi # save bswapped output on stack 655 vpshufb $Ii,$inout1,$T2 656 vmovdqu $Xi,0x70(%rsp) 657 vpshufb $Ii,$inout2,$Z0 658 vmovdqu $T2,0x60(%rsp) 659 vpshufb $Ii,$inout3,$Z1 660 vmovdqu $Z0,0x50(%rsp) 661 vpshufb $Ii,$inout4,$Z2 662 vmovdqu $Z1,0x40(%rsp) 663 vpshufb $Ii,$inout5,$Z3 # passed to _aesni_ctr32_ghash_6x 664 vmovdqu $Z2,0x30(%rsp) 665 666 call _aesni_ctr32_6x 667 668 vmovdqu ($Xip),$Xi # load Xi 669 lea 0x20+0x20($Xip),$Xip # size optimization 670 sub \$12,$len 671 mov \$0x60*2,$ret 672 vpshufb $Ii,$Xi,$Xi 673 674 call _aesni_ctr32_ghash_6x 675 vmovdqu 0x20(%rsp),$Z3 # I[5] 676 vmovdqu ($const),$Ii # borrow $Ii for .Lbswap_mask 677 vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1 678 vpunpckhqdq $Z3,$Z3,$T1 679 vmovdqu 0x20-0x20($Xip),$rndkey # borrow $rndkey for $HK 680 vmovups $inout0,-0x60($out) # save output 681 vpshufb $Ii,$inout0,$inout0 # but keep bswapped copy 682 vpxor $Z3,$T1,$T1 683 vmovups $inout1,-0x50($out) 684 vpshufb $Ii,$inout1,$inout1 685 vmovups $inout2,-0x40($out) 686 vpshufb $Ii,$inout2,$inout2 687 vmovups $inout3,-0x30($out) 688 vpshufb $Ii,$inout3,$inout3 689 vmovups $inout4,-0x20($out) 690 vpshufb $Ii,$inout4,$inout4 691 vmovups $inout5,-0x10($out) 692 vpshufb $Ii,$inout5,$inout5 693 vmovdqu $inout0,0x10(%rsp) # free $inout0 694___ 695{ my ($HK,$T3)=($rndkey,$inout0); 696 697$code.=<<___; 698 vmovdqu 0x30(%rsp),$Z2 # I[4] 699 vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2 700 vpunpckhqdq $Z2,$Z2,$T2 701 vpclmulqdq \$0x00,$Hkey,$Z3,$Z1 702 vpxor $Z2,$T2,$T2 703 vpclmulqdq \$0x11,$Hkey,$Z3,$Z3 704 vpclmulqdq \$0x00,$HK,$T1,$T1 705 706 vmovdqu 0x40(%rsp),$T3 # I[3] 707 vpclmulqdq \$0x00,$Ii,$Z2,$Z0 708 vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3 709 vpxor $Z1,$Z0,$Z0 710 vpunpckhqdq $T3,$T3,$Z1 711 vpclmulqdq \$0x11,$Ii,$Z2,$Z2 712 vpxor $T3,$Z1,$Z1 713 vpxor $Z3,$Z2,$Z2 714 vpclmulqdq \$0x10,$HK,$T2,$T2 715 vmovdqu 0x50-0x20($Xip),$HK 716 vpxor $T1,$T2,$T2 717 718 vmovdqu 0x50(%rsp),$T1 # I[2] 719 vpclmulqdq \$0x00,$Hkey,$T3,$Z3 720 vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4 721 vpxor $Z0,$Z3,$Z3 722 vpunpckhqdq $T1,$T1,$Z0 723 vpclmulqdq \$0x11,$Hkey,$T3,$T3 724 vpxor $T1,$Z0,$Z0 725 vpxor $Z2,$T3,$T3 726 vpclmulqdq \$0x00,$HK,$Z1,$Z1 727 vpxor $T2,$Z1,$Z1 728 729 vmovdqu 0x60(%rsp),$T2 # I[1] 730 vpclmulqdq \$0x00,$Ii,$T1,$Z2 731 vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5 732 vpxor $Z3,$Z2,$Z2 733 vpunpckhqdq $T2,$T2,$Z3 734 vpclmulqdq \$0x11,$Ii,$T1,$T1 735 vpxor $T2,$Z3,$Z3 736 vpxor $T3,$T1,$T1 737 vpclmulqdq \$0x10,$HK,$Z0,$Z0 738 vmovdqu 0x80-0x20($Xip),$HK 739 vpxor $Z1,$Z0,$Z0 740 741 vpxor 0x70(%rsp),$Xi,$Xi # accumulate I[0] 742 vpclmulqdq \$0x00,$Hkey,$T2,$Z1 743 vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6 744 vpunpckhqdq $Xi,$Xi,$T3 745 vpxor $Z2,$Z1,$Z1 746 vpclmulqdq \$0x11,$Hkey,$T2,$T2 747 vpxor $Xi,$T3,$T3 748 vpxor $T1,$T2,$T2 749 vpclmulqdq \$0x00,$HK,$Z3,$Z3 750 vpxor $Z0,$Z3,$Z0 751 752 vpclmulqdq \$0x00,$Ii,$Xi,$Z2 753 vmovdqu 0x00-0x20($Xip),$Hkey # $Hkey^1 754 vpunpckhqdq $inout5,$inout5,$T1 755 vpclmulqdq \$0x11,$Ii,$Xi,$Xi 756 vpxor $inout5,$T1,$T1 757 vpxor $Z1,$Z2,$Z1 758 vpclmulqdq \$0x10,$HK,$T3,$T3 759 vmovdqu 0x20-0x20($Xip),$HK 760 vpxor $T2,$Xi,$Z3 761 vpxor $Z0,$T3,$Z2 762 763 vmovdqu 0x10-0x20($Xip),$Ii # borrow $Ii for $Hkey^2 764 vpxor $Z1,$Z3,$T3 # aggregated Karatsuba post-processing 765 vpclmulqdq \$0x00,$Hkey,$inout5,$Z0 766 vpxor $T3,$Z2,$Z2 767 vpunpckhqdq $inout4,$inout4,$T2 768 vpclmulqdq \$0x11,$Hkey,$inout5,$inout5 769 vpxor $inout4,$T2,$T2 770 vpslldq \$8,$Z2,$T3 771 vpclmulqdq \$0x00,$HK,$T1,$T1 772 vpxor $T3,$Z1,$Xi 773 vpsrldq \$8,$Z2,$Z2 774 vpxor $Z2,$Z3,$Z3 775 776 vpclmulqdq \$0x00,$Ii,$inout4,$Z1 777 vmovdqu 0x30-0x20($Xip),$Hkey # $Hkey^3 778 vpxor $Z0,$Z1,$Z1 779 vpunpckhqdq $inout3,$inout3,$T3 780 vpclmulqdq \$0x11,$Ii,$inout4,$inout4 781 vpxor $inout3,$T3,$T3 782 vpxor $inout5,$inout4,$inout4 783 vpalignr \$8,$Xi,$Xi,$inout5 # 1st phase 784 vpclmulqdq \$0x10,$HK,$T2,$T2 785 vmovdqu 0x50-0x20($Xip),$HK 786 vpxor $T1,$T2,$T2 787 788 vpclmulqdq \$0x00,$Hkey,$inout3,$Z0 789 vmovdqu 0x40-0x20($Xip),$Ii # borrow $Ii for $Hkey^4 790 vpxor $Z1,$Z0,$Z0 791 vpunpckhqdq $inout2,$inout2,$T1 792 vpclmulqdq \$0x11,$Hkey,$inout3,$inout3 793 vpxor $inout2,$T1,$T1 794 vpxor $inout4,$inout3,$inout3 795 vxorps 0x10(%rsp),$Z3,$Z3 # accumulate $inout0 796 vpclmulqdq \$0x00,$HK,$T3,$T3 797 vpxor $T2,$T3,$T3 798 799 vpclmulqdq \$0x10,0x10($const),$Xi,$Xi 800 vxorps $inout5,$Xi,$Xi 801 802 vpclmulqdq \$0x00,$Ii,$inout2,$Z1 803 vmovdqu 0x60-0x20($Xip),$Hkey # $Hkey^5 804 vpxor $Z0,$Z1,$Z1 805 vpunpckhqdq $inout1,$inout1,$T2 806 vpclmulqdq \$0x11,$Ii,$inout2,$inout2 807 vpxor $inout1,$T2,$T2 808 vpalignr \$8,$Xi,$Xi,$inout5 # 2nd phase 809 vpxor $inout3,$inout2,$inout2 810 vpclmulqdq \$0x10,$HK,$T1,$T1 811 vmovdqu 0x80-0x20($Xip),$HK 812 vpxor $T3,$T1,$T1 813 814 vxorps $Z3,$inout5,$inout5 815 vpclmulqdq \$0x10,0x10($const),$Xi,$Xi 816 vxorps $inout5,$Xi,$Xi 817 818 vpclmulqdq \$0x00,$Hkey,$inout1,$Z0 819 vmovdqu 0x70-0x20($Xip),$Ii # borrow $Ii for $Hkey^6 820 vpxor $Z1,$Z0,$Z0 821 vpunpckhqdq $Xi,$Xi,$T3 822 vpclmulqdq \$0x11,$Hkey,$inout1,$inout1 823 vpxor $Xi,$T3,$T3 824 vpxor $inout2,$inout1,$inout1 825 vpclmulqdq \$0x00,$HK,$T2,$T2 826 vpxor $T1,$T2,$T2 827 828 vpclmulqdq \$0x00,$Ii,$Xi,$Z1 829 vpclmulqdq \$0x11,$Ii,$Xi,$Z3 830 vpxor $Z0,$Z1,$Z1 831 vpclmulqdq \$0x10,$HK,$T3,$Z2 832 vpxor $inout1,$Z3,$Z3 833 vpxor $T2,$Z2,$Z2 834 835 vpxor $Z1,$Z3,$Z0 # aggregated Karatsuba post-processing 836 vpxor $Z0,$Z2,$Z2 837 vpslldq \$8,$Z2,$T1 838 vmovdqu 0x10($const),$Hkey # .Lpoly 839 vpsrldq \$8,$Z2,$Z2 840 vpxor $T1,$Z1,$Xi 841 vpxor $Z2,$Z3,$Z3 842 843 vpalignr \$8,$Xi,$Xi,$T2 # 1st phase 844 vpclmulqdq \$0x10,$Hkey,$Xi,$Xi 845 vpxor $T2,$Xi,$Xi 846 847 vpalignr \$8,$Xi,$Xi,$T2 # 2nd phase 848 vpclmulqdq \$0x10,$Hkey,$Xi,$Xi 849 vpxor $Z3,$T2,$T2 850 vpxor $T2,$Xi,$Xi 851___ 852} 853$code.=<<___; 854 vpshufb ($const),$Xi,$Xi # .Lbswap_mask 855 vmovdqu $Xi,-0x40($Xip) # output Xi 856 857 vzeroupper 858___ 859$code.=<<___ if ($win64); 860 movaps -0xd8(%rax),%xmm6 861 movaps -0xc8(%rax),%xmm7 862 movaps -0xb8(%rax),%xmm8 863 movaps -0xa8(%rax),%xmm9 864 movaps -0x98(%rax),%xmm10 865 movaps -0x88(%rax),%xmm11 866 movaps -0x78(%rax),%xmm12 867 movaps -0x68(%rax),%xmm13 868 movaps -0x58(%rax),%xmm14 869 movaps -0x48(%rax),%xmm15 870___ 871$code.=<<___; 872 mov -48(%rax),%r15 873 mov -40(%rax),%r14 874 mov -32(%rax),%r13 875 mov -24(%rax),%r12 876 mov -16(%rax),%rbp 877 mov -8(%rax),%rbx 878 lea (%rax),%rsp # restore %rsp 879.Lgcm_enc_abort: 880 mov $ret,%rax # return value 881 ret 882.size aesni_gcm_encrypt,.-aesni_gcm_encrypt 883___ 884 885$code.=<<___; 886.align 64 887.Lbswap_mask: 888 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 889.Lpoly: 890 .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 891.Lone_msb: 892 .byte 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 893.Ltwo_lsb: 894 .byte 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 895.Lone_lsb: 896 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 897.asciz "AES-NI GCM module for x86_64, CRYPTOGAMS by <appro\@openssl.org>" 898.align 64 899___ 900if ($win64) { 901$rec="%rcx"; 902$frame="%rdx"; 903$context="%r8"; 904$disp="%r9"; 905 906$code.=<<___ 907.extern __imp_RtlVirtualUnwind 908.type gcm_se_handler,\@abi-omnipotent 909.align 16 910gcm_se_handler: 911 push %rsi 912 push %rdi 913 push %rbx 914 push %rbp 915 push %r12 916 push %r13 917 push %r14 918 push %r15 919 pushfq 920 sub \$64,%rsp 921 922 mov 120($context),%rax # pull context->Rax 923 mov 248($context),%rbx # pull context->Rip 924 925 mov 8($disp),%rsi # disp->ImageBase 926 mov 56($disp),%r11 # disp->HandlerData 927 928 mov 0(%r11),%r10d # HandlerData[0] 929 lea (%rsi,%r10),%r10 # prologue label 930 cmp %r10,%rbx # context->Rip<prologue label 931 jb .Lcommon_seh_tail 932 933 mov 152($context),%rax # pull context->Rsp 934 935 mov 4(%r11),%r10d # HandlerData[1] 936 lea (%rsi,%r10),%r10 # epilogue label 937 cmp %r10,%rbx # context->Rip>=epilogue label 938 jae .Lcommon_seh_tail 939 940 mov 120($context),%rax # pull context->Rax 941 942 mov -48(%rax),%r15 943 mov -40(%rax),%r14 944 mov -32(%rax),%r13 945 mov -24(%rax),%r12 946 mov -16(%rax),%rbp 947 mov -8(%rax),%rbx 948 mov %r15,240($context) 949 mov %r14,232($context) 950 mov %r13,224($context) 951 mov %r12,216($context) 952 mov %rbp,160($context) 953 mov %rbx,144($context) 954 955 lea -0xd8(%rax),%rsi # %xmm save area 956 lea 512($context),%rdi # & context.Xmm6 957 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax) 958 .long 0xa548f3fc # cld; rep movsq 959 960.Lcommon_seh_tail: 961 mov 8(%rax),%rdi 962 mov 16(%rax),%rsi 963 mov %rax,152($context) # restore context->Rsp 964 mov %rsi,168($context) # restore context->Rsi 965 mov %rdi,176($context) # restore context->Rdi 966 967 mov 40($disp),%rdi # disp->ContextRecord 968 mov $context,%rsi # context 969 mov \$154,%ecx # sizeof(CONTEXT) 970 .long 0xa548f3fc # cld; rep movsq 971 972 mov $disp,%rsi 973 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER 974 mov 8(%rsi),%rdx # arg2, disp->ImageBase 975 mov 0(%rsi),%r8 # arg3, disp->ControlPc 976 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry 977 mov 40(%rsi),%r10 # disp->ContextRecord 978 lea 56(%rsi),%r11 # &disp->HandlerData 979 lea 24(%rsi),%r12 # &disp->EstablisherFrame 980 mov %r10,32(%rsp) # arg5 981 mov %r11,40(%rsp) # arg6 982 mov %r12,48(%rsp) # arg7 983 mov %rcx,56(%rsp) # arg8, (NULL) 984 call *__imp_RtlVirtualUnwind(%rip) 985 986 mov \$1,%eax # ExceptionContinueSearch 987 add \$64,%rsp 988 popfq 989 pop %r15 990 pop %r14 991 pop %r13 992 pop %r12 993 pop %rbp 994 pop %rbx 995 pop %rdi 996 pop %rsi 997 ret 998.size gcm_se_handler,.-gcm_se_handler 999 1000.section .pdata 1001.align 4 1002 .rva .LSEH_begin_aesni_gcm_decrypt 1003 .rva .LSEH_end_aesni_gcm_decrypt 1004 .rva .LSEH_gcm_dec_info 1005 1006 .rva .LSEH_begin_aesni_gcm_encrypt 1007 .rva .LSEH_end_aesni_gcm_encrypt 1008 .rva .LSEH_gcm_enc_info 1009.section .xdata 1010.align 8 1011.LSEH_gcm_dec_info: 1012 .byte 9,0,0,0 1013 .rva gcm_se_handler 1014 .rva .Lgcm_dec_body,.Lgcm_dec_abort 1015.LSEH_gcm_enc_info: 1016 .byte 9,0,0,0 1017 .rva gcm_se_handler 1018 .rva .Lgcm_enc_body,.Lgcm_enc_abort 1019___ 1020} 1021}}} else {{{ 1022$code=<<___; # assembler is too old 1023.text 1024 1025.globl aesni_gcm_encrypt 1026.type aesni_gcm_encrypt,\@abi-omnipotent 1027aesni_gcm_encrypt: 1028 xor %eax,%eax 1029 ret 1030.size aesni_gcm_encrypt,.-aesni_gcm_encrypt 1031 1032.globl aesni_gcm_decrypt 1033.type aesni_gcm_decrypt,\@abi-omnipotent 1034aesni_gcm_decrypt: 1035 xor %eax,%eax 1036 ret 1037.size aesni_gcm_decrypt,.-aesni_gcm_decrypt 1038___ 1039}}} 1040 1041$code =~ s/\`([^\`]*)\`/eval($1)/gem; 1042 1043print $code; 1044 1045close STDOUT; 1046