1#!/usr/bin/env perl 2 3# ==================================================================== 4# [Re]written by Andy Polyakov <appro@openssl.org> for the OpenSSL 5# project. The module is, however, dual licensed under OpenSSL and 6# CRYPTOGAMS licenses depending on where you obtain it. For further 7# details see http://www.openssl.org/~appro/cryptogams/. 8# ==================================================================== 9 10# "[Re]written" was achieved in two major overhauls. In 2004 BODY_* 11# functions were re-implemented to address P4 performance issue [see 12# commentary below], and in 2006 the rest was rewritten in order to 13# gain freedom to liberate licensing terms. 14 15# January, September 2004. 16# 17# It was noted that Intel IA-32 C compiler generates code which 18# performs ~30% *faster* on P4 CPU than original *hand-coded* 19# SHA1 assembler implementation. To address this problem (and 20# prove that humans are still better than machines:-), the 21# original code was overhauled, which resulted in following 22# performance changes: 23# 24# compared with original compared with Intel cc 25# assembler impl. generated code 26# Pentium -16% +48% 27# PIII/AMD +8% +16% 28# P4 +85%(!) +45% 29# 30# As you can see Pentium came out as looser:-( Yet I reckoned that 31# improvement on P4 outweights the loss and incorporate this 32# re-tuned code to 0.9.7 and later. 33# ---------------------------------------------------------------- 34# <appro@fy.chalmers.se> 35 36# August 2009. 37# 38# George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as 39# '(c&d) + (b&(c^d))', which allows to accumulate partial results 40# and lighten "pressure" on scratch registers. This resulted in 41# >12% performance improvement on contemporary AMD cores (with no 42# degradation on other CPUs:-). Also, the code was revised to maximize 43# "distance" between instructions producing input to 'lea' instruction 44# and the 'lea' instruction itself, which is essential for Intel Atom 45# core and resulted in ~15% improvement. 46 47# October 2010. 48# 49# Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it 50# is to offload message schedule denoted by Wt in NIST specification, 51# or Xupdate in OpenSSL source, to SIMD unit. The idea is not novel, 52# and in SSE2 context was first explored by Dean Gaudet in 2004, see 53# http://arctic.org/~dean/crypto/sha1.html. Since then several things 54# have changed that made it interesting again: 55# 56# a) XMM units became faster and wider; 57# b) instruction set became more versatile; 58# c) an important observation was made by Max Locktykhin, which made 59# it possible to reduce amount of instructions required to perform 60# the operation in question, for further details see 61# http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/. 62 63# April 2011. 64# 65# Add AVX code path, probably most controversial... The thing is that 66# switch to AVX alone improves performance by as little as 4% in 67# comparison to SSSE3 code path. But below result doesn't look like 68# 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as 69# pair of µ-ops, and it's the additional µ-ops, two per round, that 70# make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded 71# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with 72# equivalent 'sh[rl]d' that is responsible for the impressive 5.1 73# cycles per processed byte. But 'sh[rl]d' is not something that used 74# to be fast, nor does it appear to be fast in upcoming Bulldozer 75# [according to its optimization manual]. Which is why AVX code path 76# is guarded by *both* AVX and synthetic bit denoting Intel CPUs. 77# One can argue that it's unfair to AMD, but without 'sh[rl]d' it 78# makes no sense to keep the AVX code path. If somebody feels that 79# strongly, it's probably more appropriate to discuss possibility of 80# using vector rotate XOP on AMD... 81 82# March 2014. 83# 84# Add support for Intel SHA Extensions. 85 86###################################################################### 87# Current performance is summarized in following table. Numbers are 88# CPU clock cycles spent to process single byte (less is better). 89# 90# x86 SSSE3 AVX 91# Pentium 15.7 - 92# PIII 11.5 - 93# P4 10.6 - 94# AMD K8 7.1 - 95# Core2 7.3 6.0/+22% - 96# Westmere 7.3 5.5/+33% - 97# Sandy Bridge 8.8 6.2/+40% 5.1(**)/+73% 98# Ivy Bridge 7.2 4.8/+51% 4.7(**)/+53% 99# Haswell 6.5 4.3/+51% 4.1(**)/+58% 100# Bulldozer 11.6 6.0/+92% 101# VIA Nano 10.6 7.5/+41% 102# Atom 12.5 9.3(*)/+35% 103# Silvermont 14.5 9.9(*)/+46% 104# 105# (*) Loop is 1056 instructions long and expected result is ~8.25. 106# The discrepancy is because of front-end limitations, so 107# called MS-ROM penalties, and on Silvermont even rotate's 108# limited parallelism. 109# 110# (**) As per above comment, the result is for AVX *plus* sh[rl]d. 111 112$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 113push(@INC,"${dir}","${dir}../../perlasm"); 114require "x86asm.pl"; 115 116&asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386"); 117 118$xmm=$ymm=0; 119for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); } 120 121# In upstream, this is controlled by shelling out to the compiler to check 122# versions, but BoringSSL is intended to be used with pre-generated perlasm 123# output, so this isn't useful anyway. 124$ymm = 1; 125 126$ymm = 0 unless ($xmm); 127 128$shaext=$xmm; ### set to zero if compiling for 1.0.1 129 130# TODO(davidben): Consider enabling the Intel SHA Extensions code once it's 131# been tested. 132$shaext = 0; 133 134&external_label("OPENSSL_ia32cap_P") if ($xmm); 135 136 137$A="eax"; 138$B="ebx"; 139$C="ecx"; 140$D="edx"; 141$E="edi"; 142$T="esi"; 143$tmp1="ebp"; 144 145@V=($A,$B,$C,$D,$E,$T); 146 147$alt=0; # 1 denotes alternative IALU implementation, which performs 148 # 8% *worse* on P4, same on Westmere and Atom, 2% better on 149 # Sandy Bridge... 150 151sub BODY_00_15 152 { 153 local($n,$a,$b,$c,$d,$e,$f)=@_; 154 155 &comment("00_15 $n"); 156 157 &mov($f,$c); # f to hold F_00_19(b,c,d) 158 if ($n==0) { &mov($tmp1,$a); } 159 else { &mov($a,$tmp1); } 160 &rotl($tmp1,5); # tmp1=ROTATE(a,5) 161 &xor($f,$d); 162 &add($tmp1,$e); # tmp1+=e; 163 &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded 164 # with xi, also note that e becomes 165 # f in next round... 166 &and($f,$b); 167 &rotr($b,2); # b=ROTATE(b,30) 168 &xor($f,$d); # f holds F_00_19(b,c,d) 169 &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi 170 171 if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round 172 &add($f,$tmp1); } # f+=tmp1 173 else { &add($tmp1,$f); } # f becomes a in next round 174 &mov($tmp1,$a) if ($alt && $n==15); 175 } 176 177sub BODY_16_19 178 { 179 local($n,$a,$b,$c,$d,$e,$f)=@_; 180 181 &comment("16_19 $n"); 182 183if ($alt) { 184 &xor($c,$d); 185 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 186 &and($tmp1,$c); # tmp1 to hold F_00_19(b,c,d), b&=c^d 187 &xor($f,&swtmp(($n+8)%16)); 188 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d) 189 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 190 &rotl($f,1); # f=ROTATE(f,1) 191 &add($e,$tmp1); # e+=F_00_19(b,c,d) 192 &xor($c,$d); # restore $c 193 &mov($tmp1,$a); # b in next round 194 &rotr($b,$n==16?2:7); # b=ROTATE(b,30) 195 &mov(&swtmp($n%16),$f); # xi=f 196 &rotl($a,5); # ROTATE(a,5) 197 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e 198 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 199 &add($f,$a); # f+=ROTATE(a,5) 200} else { 201 &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d) 202 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 203 &xor($tmp1,$d); 204 &xor($f,&swtmp(($n+8)%16)); 205 &and($tmp1,$b); 206 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 207 &rotl($f,1); # f=ROTATE(f,1) 208 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d) 209 &add($e,$tmp1); # e+=F_00_19(b,c,d) 210 &mov($tmp1,$a); 211 &rotr($b,2); # b=ROTATE(b,30) 212 &mov(&swtmp($n%16),$f); # xi=f 213 &rotl($tmp1,5); # ROTATE(a,5) 214 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e 215 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 216 &add($f,$tmp1); # f+=ROTATE(a,5) 217} 218 } 219 220sub BODY_20_39 221 { 222 local($n,$a,$b,$c,$d,$e,$f)=@_; 223 local $K=($n<40)?0x6ed9eba1:0xca62c1d6; 224 225 &comment("20_39 $n"); 226 227if ($alt) { 228 &xor($tmp1,$c); # tmp1 to hold F_20_39(b,c,d), b^=c 229 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 230 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d) 231 &xor($f,&swtmp(($n+8)%16)); 232 &add($e,$tmp1); # e+=F_20_39(b,c,d) 233 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 234 &rotl($f,1); # f=ROTATE(f,1) 235 &mov($tmp1,$a); # b in next round 236 &rotr($b,7); # b=ROTATE(b,30) 237 &mov(&swtmp($n%16),$f) if($n<77);# xi=f 238 &rotl($a,5); # ROTATE(a,5) 239 &xor($b,$c) if($n==39);# warm up for BODY_40_59 240 &and($tmp1,$b) if($n==39); 241 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY 242 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round 243 &add($f,$a); # f+=ROTATE(a,5) 244 &rotr($a,5) if ($n==79); 245} else { 246 &mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d) 247 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 248 &xor($tmp1,$c); 249 &xor($f,&swtmp(($n+8)%16)); 250 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d) 251 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 252 &rotl($f,1); # f=ROTATE(f,1) 253 &add($e,$tmp1); # e+=F_20_39(b,c,d) 254 &rotr($b,2); # b=ROTATE(b,30) 255 &mov($tmp1,$a); 256 &rotl($tmp1,5); # ROTATE(a,5) 257 &mov(&swtmp($n%16),$f) if($n<77);# xi=f 258 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY 259 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round 260 &add($f,$tmp1); # f+=ROTATE(a,5) 261} 262 } 263 264sub BODY_40_59 265 { 266 local($n,$a,$b,$c,$d,$e,$f)=@_; 267 268 &comment("40_59 $n"); 269 270if ($alt) { 271 &add($e,$tmp1); # e+=b&(c^d) 272 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 273 &mov($tmp1,$d); 274 &xor($f,&swtmp(($n+8)%16)); 275 &xor($c,$d); # restore $c 276 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 277 &rotl($f,1); # f=ROTATE(f,1) 278 &and($tmp1,$c); 279 &rotr($b,7); # b=ROTATE(b,30) 280 &add($e,$tmp1); # e+=c&d 281 &mov($tmp1,$a); # b in next round 282 &mov(&swtmp($n%16),$f); # xi=f 283 &rotl($a,5); # ROTATE(a,5) 284 &xor($b,$c) if ($n<59); 285 &and($tmp1,$b) if ($n<59);# tmp1 to hold F_40_59(b,c,d) 286 &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e+(b&(c^d)) 287 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 288 &add($f,$a); # f+=ROTATE(a,5) 289} else { 290 &mov($tmp1,$c); # tmp1 to hold F_40_59(b,c,d) 291 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 292 &xor($tmp1,$d); 293 &xor($f,&swtmp(($n+8)%16)); 294 &and($tmp1,$b); 295 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 296 &rotl($f,1); # f=ROTATE(f,1) 297 &add($tmp1,$e); # b&(c^d)+=e 298 &rotr($b,2); # b=ROTATE(b,30) 299 &mov($e,$a); # e becomes volatile 300 &rotl($e,5); # ROTATE(a,5) 301 &mov(&swtmp($n%16),$f); # xi=f 302 &lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d)) 303 &mov($tmp1,$c); 304 &add($f,$e); # f+=ROTATE(a,5) 305 &and($tmp1,$d); 306 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 307 &add($f,$tmp1); # f+=c&d 308} 309 } 310 311&function_begin("sha1_block_data_order"); 312if ($xmm) { 313 &static_label("shaext_shortcut") if ($shaext); 314 &static_label("ssse3_shortcut"); 315 &static_label("avx_shortcut") if ($ymm); 316 &static_label("K_XX_XX"); 317 318 &call (&label("pic_point")); # make it PIC! 319 &set_label("pic_point"); 320 &blindpop($tmp1); 321 &picmeup($T,"OPENSSL_ia32cap_P",$tmp1,&label("pic_point")); 322 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 323 324 &mov ($A,&DWP(0,$T)); 325 &mov ($D,&DWP(4,$T)); 326 &test ($D,1<<9); # check SSSE3 bit 327 &jz (&label("x86")); 328 &mov ($C,&DWP(8,$T)); 329 &test ($A,1<<24); # check FXSR bit 330 &jz (&label("x86")); 331 if ($shaext) { 332 &test ($C,1<<29); # check SHA bit 333 &jnz (&label("shaext_shortcut")); 334 } 335 if ($ymm) { 336 &and ($D,1<<28); # mask AVX bit 337 &and ($A,1<<30); # mask "Intel CPU" bit 338 &or ($A,$D); 339 &cmp ($A,1<<28|1<<30); 340 &je (&label("avx_shortcut")); 341 } 342 &jmp (&label("ssse3_shortcut")); 343 &set_label("x86",16); 344} 345 &mov($tmp1,&wparam(0)); # SHA_CTX *c 346 &mov($T,&wparam(1)); # const void *input 347 &mov($A,&wparam(2)); # size_t num 348 &stack_push(16+3); # allocate X[16] 349 &shl($A,6); 350 &add($A,$T); 351 &mov(&wparam(2),$A); # pointer beyond the end of input 352 &mov($E,&DWP(16,$tmp1));# pre-load E 353 &jmp(&label("loop")); 354 355&set_label("loop",16); 356 357 # copy input chunk to X, but reversing byte order! 358 for ($i=0; $i<16; $i+=4) 359 { 360 &mov($A,&DWP(4*($i+0),$T)); 361 &mov($B,&DWP(4*($i+1),$T)); 362 &mov($C,&DWP(4*($i+2),$T)); 363 &mov($D,&DWP(4*($i+3),$T)); 364 &bswap($A); 365 &bswap($B); 366 &bswap($C); 367 &bswap($D); 368 &mov(&swtmp($i+0),$A); 369 &mov(&swtmp($i+1),$B); 370 &mov(&swtmp($i+2),$C); 371 &mov(&swtmp($i+3),$D); 372 } 373 &mov(&wparam(1),$T); # redundant in 1st spin 374 375 &mov($A,&DWP(0,$tmp1)); # load SHA_CTX 376 &mov($B,&DWP(4,$tmp1)); 377 &mov($C,&DWP(8,$tmp1)); 378 &mov($D,&DWP(12,$tmp1)); 379 # E is pre-loaded 380 381 for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); } 382 for(;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); } 383 for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } 384 for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } 385 for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } 386 387 (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check 388 389 &mov($tmp1,&wparam(0)); # re-load SHA_CTX* 390 &mov($D,&wparam(1)); # D is last "T" and is discarded 391 392 &add($E,&DWP(0,$tmp1)); # E is last "A"... 393 &add($T,&DWP(4,$tmp1)); 394 &add($A,&DWP(8,$tmp1)); 395 &add($B,&DWP(12,$tmp1)); 396 &add($C,&DWP(16,$tmp1)); 397 398 &mov(&DWP(0,$tmp1),$E); # update SHA_CTX 399 &add($D,64); # advance input pointer 400 &mov(&DWP(4,$tmp1),$T); 401 &cmp($D,&wparam(2)); # have we reached the end yet? 402 &mov(&DWP(8,$tmp1),$A); 403 &mov($E,$C); # C is last "E" which needs to be "pre-loaded" 404 &mov(&DWP(12,$tmp1),$B); 405 &mov($T,$D); # input pointer 406 &mov(&DWP(16,$tmp1),$C); 407 &jb(&label("loop")); 408 409 &stack_pop(16+3); 410&function_end("sha1_block_data_order"); 411 412if ($xmm) { 413if ($shaext) { 414###################################################################### 415# Intel SHA Extensions implementation of SHA1 update function. 416# 417my ($ctx,$inp,$num)=("edi","esi","ecx"); 418my ($ABCD,$E,$E_,$BSWAP)=map("xmm$_",(0..3)); 419my @MSG=map("xmm$_",(4..7)); 420 421sub sha1rnds4 { 422 my ($dst,$src,$imm)=@_; 423 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/) 424 { &data_byte(0x0f,0x3a,0xcc,0xc0|($1<<3)|$2,$imm); } 425} 426sub sha1op38 { 427 my ($opcodelet,$dst,$src)=@_; 428 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/) 429 { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); } 430} 431sub sha1nexte { sha1op38(0xc8,@_); } 432sub sha1msg1 { sha1op38(0xc9,@_); } 433sub sha1msg2 { sha1op38(0xca,@_); } 434 435&function_begin("_sha1_block_data_order_shaext"); 436 &call (&label("pic_point")); # make it PIC! 437 &set_label("pic_point"); 438 &blindpop($tmp1); 439 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 440&set_label("shaext_shortcut"); 441 &mov ($ctx,&wparam(0)); 442 &mov ("ebx","esp"); 443 &mov ($inp,&wparam(1)); 444 &mov ($num,&wparam(2)); 445 &sub ("esp",32); 446 447 &movdqu ($ABCD,&QWP(0,$ctx)); 448 &movd ($E,&DWP(16,$ctx)); 449 &and ("esp",-32); 450 &movdqa ($BSWAP,&QWP(0x50,$tmp1)); # byte-n-word swap 451 452 &movdqu (@MSG[0],&QWP(0,$inp)); 453 &pshufd ($ABCD,$ABCD,0b00011011); # flip word order 454 &movdqu (@MSG[1],&QWP(0x10,$inp)); 455 &pshufd ($E,$E,0b00011011); # flip word order 456 &movdqu (@MSG[2],&QWP(0x20,$inp)); 457 &pshufb (@MSG[0],$BSWAP); 458 &movdqu (@MSG[3],&QWP(0x30,$inp)); 459 &pshufb (@MSG[1],$BSWAP); 460 &pshufb (@MSG[2],$BSWAP); 461 &pshufb (@MSG[3],$BSWAP); 462 &jmp (&label("loop_shaext")); 463 464&set_label("loop_shaext",16); 465 &dec ($num); 466 &lea ("eax",&DWP(0x40,$inp)); 467 &movdqa (&QWP(0,"esp"),$E); # offload $E 468 &paddd ($E,@MSG[0]); 469 &cmovne ($inp,"eax"); 470 &movdqa (&QWP(16,"esp"),$ABCD); # offload $ABCD 471 472for($i=0;$i<20-4;$i+=2) { 473 &sha1msg1 (@MSG[0],@MSG[1]); 474 &movdqa ($E_,$ABCD); 475 &sha1rnds4 ($ABCD,$E,int($i/5)); # 0-3... 476 &sha1nexte ($E_,@MSG[1]); 477 &pxor (@MSG[0],@MSG[2]); 478 &sha1msg1 (@MSG[1],@MSG[2]); 479 &sha1msg2 (@MSG[0],@MSG[3]); 480 481 &movdqa ($E,$ABCD); 482 &sha1rnds4 ($ABCD,$E_,int(($i+1)/5)); 483 &sha1nexte ($E,@MSG[2]); 484 &pxor (@MSG[1],@MSG[3]); 485 &sha1msg2 (@MSG[1],@MSG[0]); 486 487 push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG)); 488} 489 &movdqu (@MSG[0],&QWP(0,$inp)); 490 &movdqa ($E_,$ABCD); 491 &sha1rnds4 ($ABCD,$E,3); # 64-67 492 &sha1nexte ($E_,@MSG[1]); 493 &movdqu (@MSG[1],&QWP(0x10,$inp)); 494 &pshufb (@MSG[0],$BSWAP); 495 496 &movdqa ($E,$ABCD); 497 &sha1rnds4 ($ABCD,$E_,3); # 68-71 498 &sha1nexte ($E,@MSG[2]); 499 &movdqu (@MSG[2],&QWP(0x20,$inp)); 500 &pshufb (@MSG[1],$BSWAP); 501 502 &movdqa ($E_,$ABCD); 503 &sha1rnds4 ($ABCD,$E,3); # 72-75 504 &sha1nexte ($E_,@MSG[3]); 505 &movdqu (@MSG[3],&QWP(0x30,$inp)); 506 &pshufb (@MSG[2],$BSWAP); 507 508 &movdqa ($E,$ABCD); 509 &sha1rnds4 ($ABCD,$E_,3); # 76-79 510 &movdqa ($E_,&QWP(0,"esp")); 511 &pshufb (@MSG[3],$BSWAP); 512 &sha1nexte ($E,$E_); 513 &paddd ($ABCD,&QWP(16,"esp")); 514 515 &jnz (&label("loop_shaext")); 516 517 &pshufd ($ABCD,$ABCD,0b00011011); 518 &pshufd ($E,$E,0b00011011); 519 &movdqu (&QWP(0,$ctx),$ABCD) 520 &movd (&DWP(16,$ctx),$E); 521 &mov ("esp","ebx"); 522&function_end("_sha1_block_data_order_shaext"); 523} 524###################################################################### 525# The SSSE3 implementation. 526# 527# %xmm[0-7] are used as ring @X[] buffer containing quadruples of last 528# 32 elements of the message schedule or Xupdate outputs. First 4 529# quadruples are simply byte-swapped input, next 4 are calculated 530# according to method originally suggested by Dean Gaudet (modulo 531# being implemented in SSSE3). Once 8 quadruples or 32 elements are 532# collected, it switches to routine proposed by Max Locktyukhin. 533# 534# Calculations inevitably require temporary reqisters, and there are 535# no %xmm registers left to spare. For this reason part of the ring 536# buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring 537# buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] - 538# X[-5], and X[4] - X[-4]... 539# 540# Another notable optimization is aggressive stack frame compression 541# aiming to minimize amount of 9-byte instructions... 542# 543# Yet another notable optimization is "jumping" $B variable. It means 544# that there is no register permanently allocated for $B value. This 545# allowed to eliminate one instruction from body_20_39... 546# 547my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded 548my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4 549my @V=($A,$B,$C,$D,$E); 550my $j=0; # hash round 551my $rx=0; 552my @T=($T,$tmp1); 553my $inp; 554 555my $_rol=sub { &rol(@_) }; 556my $_ror=sub { &ror(@_) }; 557 558&function_begin("_sha1_block_data_order_ssse3"); 559 &call (&label("pic_point")); # make it PIC! 560 &set_label("pic_point"); 561 &blindpop($tmp1); 562 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 563&set_label("ssse3_shortcut"); 564 565 &movdqa (@X[3],&QWP(0,$tmp1)); # K_00_19 566 &movdqa (@X[4],&QWP(16,$tmp1)); # K_20_39 567 &movdqa (@X[5],&QWP(32,$tmp1)); # K_40_59 568 &movdqa (@X[6],&QWP(48,$tmp1)); # K_60_79 569 &movdqa (@X[2],&QWP(64,$tmp1)); # pbswap mask 570 571 &mov ($E,&wparam(0)); # load argument block 572 &mov ($inp=@T[1],&wparam(1)); 573 &mov ($D,&wparam(2)); 574 &mov (@T[0],"esp"); 575 576 # stack frame layout 577 # 578 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area 579 # X[4]+K X[5]+K X[6]+K X[7]+K 580 # X[8]+K X[9]+K X[10]+K X[11]+K 581 # X[12]+K X[13]+K X[14]+K X[15]+K 582 # 583 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area 584 # X[4] X[5] X[6] X[7] 585 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19 586 # 587 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants 588 # K_40_59 K_40_59 K_40_59 K_40_59 589 # K_60_79 K_60_79 K_60_79 K_60_79 590 # K_00_19 K_00_19 K_00_19 K_00_19 591 # pbswap mask 592 # 593 # +192 ctx # argument block 594 # +196 inp 595 # +200 end 596 # +204 esp 597 &sub ("esp",208); 598 &and ("esp",-64); 599 600 &movdqa (&QWP(112+0,"esp"),@X[4]); # copy constants 601 &movdqa (&QWP(112+16,"esp"),@X[5]); 602 &movdqa (&QWP(112+32,"esp"),@X[6]); 603 &shl ($D,6); # len*64 604 &movdqa (&QWP(112+48,"esp"),@X[3]); 605 &add ($D,$inp); # end of input 606 &movdqa (&QWP(112+64,"esp"),@X[2]); 607 &add ($inp,64); 608 &mov (&DWP(192+0,"esp"),$E); # save argument block 609 &mov (&DWP(192+4,"esp"),$inp); 610 &mov (&DWP(192+8,"esp"),$D); 611 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp 612 613 &mov ($A,&DWP(0,$E)); # load context 614 &mov ($B,&DWP(4,$E)); 615 &mov ($C,&DWP(8,$E)); 616 &mov ($D,&DWP(12,$E)); 617 &mov ($E,&DWP(16,$E)); 618 &mov (@T[0],$B); # magic seed 619 620 &movdqu (@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3] 621 &movdqu (@X[-3&7],&QWP(-48,$inp)); 622 &movdqu (@X[-2&7],&QWP(-32,$inp)); 623 &movdqu (@X[-1&7],&QWP(-16,$inp)); 624 &pshufb (@X[-4&7],@X[2]); # byte swap 625 &pshufb (@X[-3&7],@X[2]); 626 &pshufb (@X[-2&7],@X[2]); 627 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 628 &pshufb (@X[-1&7],@X[2]); 629 &paddd (@X[-4&7],@X[3]); # add K_00_19 630 &paddd (@X[-3&7],@X[3]); 631 &paddd (@X[-2&7],@X[3]); 632 &movdqa (&QWP(0,"esp"),@X[-4&7]); # X[]+K xfer to IALU 633 &psubd (@X[-4&7],@X[3]); # restore X[] 634 &movdqa (&QWP(0+16,"esp"),@X[-3&7]); 635 &psubd (@X[-3&7],@X[3]); 636 &movdqa (&QWP(0+32,"esp"),@X[-2&7]); 637 &mov (@T[1],$C); 638 &psubd (@X[-2&7],@X[3]); 639 &xor (@T[1],$D); 640 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]); 641 &and (@T[0],@T[1]); 642 &jmp (&label("loop")); 643 644###################################################################### 645# SSE instruction sequence is first broken to groups of indepentent 646# instructions, independent in respect to their inputs and shifter 647# (not all architectures have more than one). Then IALU instructions 648# are "knitted in" between the SSE groups. Distance is maintained for 649# SSE latency of 2 in hope that it fits better upcoming AMD Bulldozer 650# [which allegedly also implements SSSE3]... 651# 652# Temporary registers usage. X[2] is volatile at the entry and at the 653# end is restored from backtrace ring buffer. X[3] is expected to 654# contain current K_XX_XX constant and is used to caclulate X[-1]+K 655# from previous round, it becomes volatile the moment the value is 656# saved to stack for transfer to IALU. X[4] becomes volatile whenever 657# X[-4] is accumulated and offloaded to backtrace ring buffer, at the 658# end it is loaded with next K_XX_XX [which becomes X[3] in next 659# round]... 660# 661sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4 662{ use integer; 663 my $body = shift; 664 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions 665 my ($a,$b,$c,$d,$e); 666 667 eval(shift(@insns)); # ror 668 eval(shift(@insns)); 669 eval(shift(@insns)); 670 &punpcklqdq(@X[0],@X[-3&7]); # compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8); 671 &movdqa (@X[2],@X[-1&7]); 672 eval(shift(@insns)); 673 eval(shift(@insns)); 674 675 &paddd (@X[3],@X[-1&7]); 676 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer 677 eval(shift(@insns)); # rol 678 eval(shift(@insns)); 679 &psrldq (@X[2],4); # "X[-3]", 3 dwords 680 eval(shift(@insns)); 681 eval(shift(@insns)); 682 &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]" 683 eval(shift(@insns)); 684 eval(shift(@insns)); # ror 685 686 &pxor (@X[2],@X[-2&7]); # "X[-3]"^"X[-8]" 687 eval(shift(@insns)); 688 eval(shift(@insns)); 689 eval(shift(@insns)); 690 691 &pxor (@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]" 692 eval(shift(@insns)); 693 eval(shift(@insns)); # rol 694 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 695 eval(shift(@insns)); 696 eval(shift(@insns)); 697 698 &movdqa (@X[4],@X[0]); 699 eval(shift(@insns)); 700 eval(shift(@insns)); 701 eval(shift(@insns)); # ror 702 &movdqa (@X[2],@X[0]); 703 eval(shift(@insns)); 704 705 &pslldq (@X[4],12); # "X[0]"<<96, extract one dword 706 &paddd (@X[0],@X[0]); 707 eval(shift(@insns)); 708 eval(shift(@insns)); 709 710 &psrld (@X[2],31); 711 eval(shift(@insns)); 712 eval(shift(@insns)); # rol 713 &movdqa (@X[3],@X[4]); 714 eval(shift(@insns)); 715 eval(shift(@insns)); 716 eval(shift(@insns)); 717 718 &psrld (@X[4],30); 719 eval(shift(@insns)); 720 eval(shift(@insns)); # ror 721 &por (@X[0],@X[2]); # "X[0]"<<<=1 722 eval(shift(@insns)); 723 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer 724 eval(shift(@insns)); 725 eval(shift(@insns)); 726 727 &pslld (@X[3],2); 728 eval(shift(@insns)); 729 eval(shift(@insns)); # rol 730 &pxor (@X[0],@X[4]); 731 &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX 732 eval(shift(@insns)); 733 eval(shift(@insns)); 734 735 &pxor (@X[0],@X[3]); # "X[0]"^=("X[0]"<<96)<<<2 736 &pshufd (@X[1],@X[-3&7],0xee) if ($Xi<7); # was &movdqa (@X[1],@X[-2&7]) 737 &pshufd (@X[3],@X[-1&7],0xee) if ($Xi==7); 738 eval(shift(@insns)); 739 eval(shift(@insns)); 740 741 foreach (@insns) { eval; } # remaining instructions [if any] 742 743 $Xi++; push(@X,shift(@X)); # "rotate" X[] 744} 745 746sub Xupdate_ssse3_32_79() 747{ use integer; 748 my $body = shift; 749 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions 750 my ($a,$b,$c,$d,$e); 751 752 eval(shift(@insns)); # body_20_39 753 &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" 754 &punpcklqdq(@X[2],@X[-1&7]); # compose "X[-6]", was &palignr(@X[2],@X[-2&7],8) 755 eval(shift(@insns)); 756 eval(shift(@insns)); 757 eval(shift(@insns)); # rol 758 759 &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]" 760 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer 761 eval(shift(@insns)); 762 eval(shift(@insns)); 763 eval(shift(@insns)) if (@insns[0] =~ /_rol/); 764 if ($Xi%5) { 765 &movdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX... 766 } else { # ... or load next one 767 &movdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp")); 768 } 769 eval(shift(@insns)); # ror 770 &paddd (@X[3],@X[-1&7]); 771 eval(shift(@insns)); 772 773 &pxor (@X[0],@X[2]); # "X[0]"^="X[-6]" 774 eval(shift(@insns)); # body_20_39 775 eval(shift(@insns)); 776 eval(shift(@insns)); 777 eval(shift(@insns)); # rol 778 779 &movdqa (@X[2],@X[0]); 780 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 781 eval(shift(@insns)); 782 eval(shift(@insns)); 783 eval(shift(@insns)); # ror 784 eval(shift(@insns)); 785 eval(shift(@insns)) if (@insns[0] =~ /_rol/); 786 787 &pslld (@X[0],2); 788 eval(shift(@insns)); # body_20_39 789 eval(shift(@insns)); 790 &psrld (@X[2],30); 791 eval(shift(@insns)); 792 eval(shift(@insns)); # rol 793 eval(shift(@insns)); 794 eval(shift(@insns)); 795 eval(shift(@insns)); # ror 796 eval(shift(@insns)); 797 eval(shift(@insns)) if (@insns[1] =~ /_rol/); 798 eval(shift(@insns)) if (@insns[0] =~ /_rol/); 799 800 &por (@X[0],@X[2]); # "X[0]"<<<=2 801 eval(shift(@insns)); # body_20_39 802 eval(shift(@insns)); 803 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer 804 eval(shift(@insns)); 805 eval(shift(@insns)); # rol 806 eval(shift(@insns)); 807 eval(shift(@insns)); 808 eval(shift(@insns)); # ror 809 &pshufd (@X[3],@X[-1],0xee) if ($Xi<19); # was &movdqa (@X[3],@X[0]) 810 eval(shift(@insns)); 811 812 foreach (@insns) { eval; } # remaining instructions 813 814 $Xi++; push(@X,shift(@X)); # "rotate" X[] 815} 816 817sub Xuplast_ssse3_80() 818{ use integer; 819 my $body = shift; 820 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 821 my ($a,$b,$c,$d,$e); 822 823 eval(shift(@insns)); 824 eval(shift(@insns)); 825 eval(shift(@insns)); 826 eval(shift(@insns)); 827 eval(shift(@insns)); 828 eval(shift(@insns)); 829 eval(shift(@insns)); 830 &paddd (@X[3],@X[-1&7]); 831 eval(shift(@insns)); 832 eval(shift(@insns)); 833 eval(shift(@insns)); 834 eval(shift(@insns)); 835 836 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU 837 838 foreach (@insns) { eval; } # remaining instructions 839 840 &mov ($inp=@T[1],&DWP(192+4,"esp")); 841 &cmp ($inp,&DWP(192+8,"esp")); 842 &je (&label("done")); 843 844 &movdqa (@X[3],&QWP(112+48,"esp")); # K_00_19 845 &movdqa (@X[2],&QWP(112+64,"esp")); # pbswap mask 846 &movdqu (@X[-4&7],&QWP(0,$inp)); # load input 847 &movdqu (@X[-3&7],&QWP(16,$inp)); 848 &movdqu (@X[-2&7],&QWP(32,$inp)); 849 &movdqu (@X[-1&7],&QWP(48,$inp)); 850 &add ($inp,64); 851 &pshufb (@X[-4&7],@X[2]); # byte swap 852 &mov (&DWP(192+4,"esp"),$inp); 853 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 854 855 $Xi=0; 856} 857 858sub Xloop_ssse3() 859{ use integer; 860 my $body = shift; 861 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 862 my ($a,$b,$c,$d,$e); 863 864 eval(shift(@insns)); 865 eval(shift(@insns)); 866 eval(shift(@insns)); 867 eval(shift(@insns)); 868 eval(shift(@insns)); 869 eval(shift(@insns)); 870 eval(shift(@insns)); 871 &pshufb (@X[($Xi-3)&7],@X[2]); 872 eval(shift(@insns)); 873 eval(shift(@insns)); 874 eval(shift(@insns)); 875 eval(shift(@insns)); 876 &paddd (@X[($Xi-4)&7],@X[3]); 877 eval(shift(@insns)); 878 eval(shift(@insns)); 879 eval(shift(@insns)); 880 eval(shift(@insns)); 881 &movdqa (&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]); # X[]+K xfer to IALU 882 eval(shift(@insns)); 883 eval(shift(@insns)); 884 eval(shift(@insns)); 885 eval(shift(@insns)); 886 &psubd (@X[($Xi-4)&7],@X[3]); 887 888 foreach (@insns) { eval; } 889 $Xi++; 890} 891 892sub Xtail_ssse3() 893{ use integer; 894 my $body = shift; 895 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 896 my ($a,$b,$c,$d,$e); 897 898 foreach (@insns) { eval; } 899} 900 901sub body_00_19 () { # ((c^d)&b)^d 902 # on start @T[0]=(c^d)&b 903 return &body_20_39() if ($rx==19); $rx++; 904 ( 905 '($a,$b,$c,$d,$e)=@V;'. 906 '&$_ror ($b,$j?7:2);', # $b>>>2 907 '&xor (@T[0],$d);', 908 '&mov (@T[1],$a);', # $b in next round 909 910 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer 911 '&xor ($b,$c);', # $c^$d for next round 912 913 '&$_rol ($a,5);', 914 '&add ($e,@T[0]);', 915 '&and (@T[1],$b);', # ($b&($c^$d)) for next round 916 917 '&xor ($b,$c);', # restore $b 918 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 919 ); 920} 921 922sub body_20_39 () { # b^d^c 923 # on entry @T[0]=b^d 924 return &body_40_59() if ($rx==39); $rx++; 925 ( 926 '($a,$b,$c,$d,$e)=@V;'. 927 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer 928 '&xor (@T[0],$d) if($j==19);'. 929 '&xor (@T[0],$c) if($j> 19);', # ($b^$d^$c) 930 '&mov (@T[1],$a);', # $b in next round 931 932 '&$_rol ($a,5);', 933 '&add ($e,@T[0]);', 934 '&xor (@T[1],$c) if ($j< 79);', # $b^$d for next round 935 936 '&$_ror ($b,7);', # $b>>>2 937 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 938 ); 939} 940 941sub body_40_59 () { # ((b^c)&(c^d))^c 942 # on entry @T[0]=(b^c), (c^=d) 943 $rx++; 944 ( 945 '($a,$b,$c,$d,$e)=@V;'. 946 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer 947 '&and (@T[0],$c) if ($j>=40);', # (b^c)&(c^d) 948 '&xor ($c,$d) if ($j>=40);', # restore $c 949 950 '&$_ror ($b,7);', # $b>>>2 951 '&mov (@T[1],$a);', # $b for next round 952 '&xor (@T[0],$c);', 953 954 '&$_rol ($a,5);', 955 '&add ($e,@T[0]);', 956 '&xor (@T[1],$c) if ($j==59);'. 957 '&xor (@T[1],$b) if ($j< 59);', # b^c for next round 958 959 '&xor ($b,$c) if ($j< 59);', # c^d for next round 960 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 961 ); 962} 963###### 964sub bodyx_00_19 () { # ((c^d)&b)^d 965 # on start @T[0]=(b&c)^(~b&d), $e+=X[]+K 966 return &bodyx_20_39() if ($rx==19); $rx++; 967 ( 968 '($a,$b,$c,$d,$e)=@V;'. 969 970 '&rorx ($b,$b,2) if ($j==0);'. # $b>>>2 971 '&rorx ($b,@T[1],7) if ($j!=0);', # $b>>>2 972 '&lea ($e,&DWP(0,$e,@T[0]));', 973 '&rorx (@T[0],$a,5);', 974 975 '&andn (@T[1],$a,$c);', 976 '&and ($a,$b)', 977 '&add ($d,&DWP(4*(($j+1)&15),"esp"));', # X[]+K xfer 978 979 '&xor (@T[1],$a)', 980 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 981 ); 982} 983 984sub bodyx_20_39 () { # b^d^c 985 # on start $b=b^c^d 986 return &bodyx_40_59() if ($rx==39); $rx++; 987 ( 988 '($a,$b,$c,$d,$e)=@V;'. 989 990 '&add ($e,($j==19?@T[0]:$b))', 991 '&rorx ($b,@T[1],7);', # $b>>>2 992 '&rorx (@T[0],$a,5);', 993 994 '&xor ($a,$b) if ($j<79);', 995 '&add ($d,&DWP(4*(($j+1)&15),"esp")) if ($j<79);', # X[]+K xfer 996 '&xor ($a,$c) if ($j<79);', 997 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 998 ); 999} 1000 1001sub bodyx_40_59 () { # ((b^c)&(c^d))^c 1002 # on start $b=((b^c)&(c^d))^c 1003 return &bodyx_20_39() if ($rx==59); $rx++; 1004 ( 1005 '($a,$b,$c,$d,$e)=@V;'. 1006 1007 '&rorx (@T[0],$a,5)', 1008 '&lea ($e,&DWP(0,$e,$b))', 1009 '&rorx ($b,@T[1],7)', # $b>>>2 1010 '&add ($d,&DWP(4*(($j+1)&15),"esp"))', # X[]+K xfer 1011 1012 '&mov (@T[1],$c)', 1013 '&xor ($a,$b)', # b^c for next round 1014 '&xor (@T[1],$b)', # c^d for next round 1015 1016 '&and ($a,@T[1])', 1017 '&add ($e,@T[0])', 1018 '&xor ($a,$b)' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 1019 ); 1020} 1021 1022&set_label("loop",16); 1023 &Xupdate_ssse3_16_31(\&body_00_19); 1024 &Xupdate_ssse3_16_31(\&body_00_19); 1025 &Xupdate_ssse3_16_31(\&body_00_19); 1026 &Xupdate_ssse3_16_31(\&body_00_19); 1027 &Xupdate_ssse3_32_79(\&body_00_19); 1028 &Xupdate_ssse3_32_79(\&body_20_39); 1029 &Xupdate_ssse3_32_79(\&body_20_39); 1030 &Xupdate_ssse3_32_79(\&body_20_39); 1031 &Xupdate_ssse3_32_79(\&body_20_39); 1032 &Xupdate_ssse3_32_79(\&body_20_39); 1033 &Xupdate_ssse3_32_79(\&body_40_59); 1034 &Xupdate_ssse3_32_79(\&body_40_59); 1035 &Xupdate_ssse3_32_79(\&body_40_59); 1036 &Xupdate_ssse3_32_79(\&body_40_59); 1037 &Xupdate_ssse3_32_79(\&body_40_59); 1038 &Xupdate_ssse3_32_79(\&body_20_39); 1039 &Xuplast_ssse3_80(\&body_20_39); # can jump to "done" 1040 1041 $saved_j=$j; @saved_V=@V; 1042 1043 &Xloop_ssse3(\&body_20_39); 1044 &Xloop_ssse3(\&body_20_39); 1045 &Xloop_ssse3(\&body_20_39); 1046 1047 &mov (@T[1],&DWP(192,"esp")); # update context 1048 &add ($A,&DWP(0,@T[1])); 1049 &add (@T[0],&DWP(4,@T[1])); # $b 1050 &add ($C,&DWP(8,@T[1])); 1051 &mov (&DWP(0,@T[1]),$A); 1052 &add ($D,&DWP(12,@T[1])); 1053 &mov (&DWP(4,@T[1]),@T[0]); 1054 &add ($E,&DWP(16,@T[1])); 1055 &mov (&DWP(8,@T[1]),$C); 1056 &mov ($B,$C); 1057 &mov (&DWP(12,@T[1]),$D); 1058 &xor ($B,$D); 1059 &mov (&DWP(16,@T[1]),$E); 1060 &mov (@T[1],@T[0]); 1061 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]); 1062 &and (@T[0],$B); 1063 &mov ($B,$T[1]); 1064 1065 &jmp (&label("loop")); 1066 1067&set_label("done",16); $j=$saved_j; @V=@saved_V; 1068 1069 &Xtail_ssse3(\&body_20_39); 1070 &Xtail_ssse3(\&body_20_39); 1071 &Xtail_ssse3(\&body_20_39); 1072 1073 &mov (@T[1],&DWP(192,"esp")); # update context 1074 &add ($A,&DWP(0,@T[1])); 1075 &mov ("esp",&DWP(192+12,"esp")); # restore %esp 1076 &add (@T[0],&DWP(4,@T[1])); # $b 1077 &add ($C,&DWP(8,@T[1])); 1078 &mov (&DWP(0,@T[1]),$A); 1079 &add ($D,&DWP(12,@T[1])); 1080 &mov (&DWP(4,@T[1]),@T[0]); 1081 &add ($E,&DWP(16,@T[1])); 1082 &mov (&DWP(8,@T[1]),$C); 1083 &mov (&DWP(12,@T[1]),$D); 1084 &mov (&DWP(16,@T[1]),$E); 1085 1086&function_end("_sha1_block_data_order_ssse3"); 1087 1088$rx=0; # reset 1089 1090if ($ymm) { 1091my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded 1092my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4 1093my @V=($A,$B,$C,$D,$E); 1094my $j=0; # hash round 1095my @T=($T,$tmp1); 1096my $inp; 1097 1098my $_rol=sub { &shld(@_[0],@_) }; 1099my $_ror=sub { &shrd(@_[0],@_) }; 1100 1101&function_begin("_sha1_block_data_order_avx"); 1102 &call (&label("pic_point")); # make it PIC! 1103 &set_label("pic_point"); 1104 &blindpop($tmp1); 1105 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 1106&set_label("avx_shortcut"); 1107 &vzeroall(); 1108 1109 &vmovdqa(@X[3],&QWP(0,$tmp1)); # K_00_19 1110 &vmovdqa(@X[4],&QWP(16,$tmp1)); # K_20_39 1111 &vmovdqa(@X[5],&QWP(32,$tmp1)); # K_40_59 1112 &vmovdqa(@X[6],&QWP(48,$tmp1)); # K_60_79 1113 &vmovdqa(@X[2],&QWP(64,$tmp1)); # pbswap mask 1114 1115 &mov ($E,&wparam(0)); # load argument block 1116 &mov ($inp=@T[1],&wparam(1)); 1117 &mov ($D,&wparam(2)); 1118 &mov (@T[0],"esp"); 1119 1120 # stack frame layout 1121 # 1122 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area 1123 # X[4]+K X[5]+K X[6]+K X[7]+K 1124 # X[8]+K X[9]+K X[10]+K X[11]+K 1125 # X[12]+K X[13]+K X[14]+K X[15]+K 1126 # 1127 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area 1128 # X[4] X[5] X[6] X[7] 1129 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19 1130 # 1131 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants 1132 # K_40_59 K_40_59 K_40_59 K_40_59 1133 # K_60_79 K_60_79 K_60_79 K_60_79 1134 # K_00_19 K_00_19 K_00_19 K_00_19 1135 # pbswap mask 1136 # 1137 # +192 ctx # argument block 1138 # +196 inp 1139 # +200 end 1140 # +204 esp 1141 &sub ("esp",208); 1142 &and ("esp",-64); 1143 1144 &vmovdqa(&QWP(112+0,"esp"),@X[4]); # copy constants 1145 &vmovdqa(&QWP(112+16,"esp"),@X[5]); 1146 &vmovdqa(&QWP(112+32,"esp"),@X[6]); 1147 &shl ($D,6); # len*64 1148 &vmovdqa(&QWP(112+48,"esp"),@X[3]); 1149 &add ($D,$inp); # end of input 1150 &vmovdqa(&QWP(112+64,"esp"),@X[2]); 1151 &add ($inp,64); 1152 &mov (&DWP(192+0,"esp"),$E); # save argument block 1153 &mov (&DWP(192+4,"esp"),$inp); 1154 &mov (&DWP(192+8,"esp"),$D); 1155 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp 1156 1157 &mov ($A,&DWP(0,$E)); # load context 1158 &mov ($B,&DWP(4,$E)); 1159 &mov ($C,&DWP(8,$E)); 1160 &mov ($D,&DWP(12,$E)); 1161 &mov ($E,&DWP(16,$E)); 1162 &mov (@T[0],$B); # magic seed 1163 1164 &vmovdqu(@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3] 1165 &vmovdqu(@X[-3&7],&QWP(-48,$inp)); 1166 &vmovdqu(@X[-2&7],&QWP(-32,$inp)); 1167 &vmovdqu(@X[-1&7],&QWP(-16,$inp)); 1168 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap 1169 &vpshufb(@X[-3&7],@X[-3&7],@X[2]); 1170 &vpshufb(@X[-2&7],@X[-2&7],@X[2]); 1171 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 1172 &vpshufb(@X[-1&7],@X[-1&7],@X[2]); 1173 &vpaddd (@X[0],@X[-4&7],@X[3]); # add K_00_19 1174 &vpaddd (@X[1],@X[-3&7],@X[3]); 1175 &vpaddd (@X[2],@X[-2&7],@X[3]); 1176 &vmovdqa(&QWP(0,"esp"),@X[0]); # X[]+K xfer to IALU 1177 &mov (@T[1],$C); 1178 &vmovdqa(&QWP(0+16,"esp"),@X[1]); 1179 &xor (@T[1],$D); 1180 &vmovdqa(&QWP(0+32,"esp"),@X[2]); 1181 &and (@T[0],@T[1]); 1182 &jmp (&label("loop")); 1183 1184sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4 1185{ use integer; 1186 my $body = shift; 1187 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions 1188 my ($a,$b,$c,$d,$e); 1189 1190 eval(shift(@insns)); 1191 eval(shift(@insns)); 1192 &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]" 1193 eval(shift(@insns)); 1194 eval(shift(@insns)); 1195 1196 &vpaddd (@X[3],@X[3],@X[-1&7]); 1197 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer 1198 eval(shift(@insns)); 1199 eval(shift(@insns)); 1200 &vpsrldq(@X[2],@X[-1&7],4); # "X[-3]", 3 dwords 1201 eval(shift(@insns)); 1202 eval(shift(@insns)); 1203 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]" 1204 eval(shift(@insns)); 1205 eval(shift(@insns)); 1206 1207 &vpxor (@X[2],@X[2],@X[-2&7]); # "X[-3]"^"X[-8]" 1208 eval(shift(@insns)); 1209 eval(shift(@insns)); 1210 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 1211 eval(shift(@insns)); 1212 eval(shift(@insns)); 1213 1214 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]" 1215 eval(shift(@insns)); 1216 eval(shift(@insns)); 1217 eval(shift(@insns)); 1218 eval(shift(@insns)); 1219 1220 &vpsrld (@X[2],@X[0],31); 1221 eval(shift(@insns)); 1222 eval(shift(@insns)); 1223 eval(shift(@insns)); 1224 eval(shift(@insns)); 1225 1226 &vpslldq(@X[4],@X[0],12); # "X[0]"<<96, extract one dword 1227 &vpaddd (@X[0],@X[0],@X[0]); 1228 eval(shift(@insns)); 1229 eval(shift(@insns)); 1230 eval(shift(@insns)); 1231 eval(shift(@insns)); 1232 1233 &vpsrld (@X[3],@X[4],30); 1234 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=1 1235 eval(shift(@insns)); 1236 eval(shift(@insns)); 1237 eval(shift(@insns)); 1238 eval(shift(@insns)); 1239 1240 &vpslld (@X[4],@X[4],2); 1241 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer 1242 eval(shift(@insns)); 1243 eval(shift(@insns)); 1244 &vpxor (@X[0],@X[0],@X[3]); 1245 eval(shift(@insns)); 1246 eval(shift(@insns)); 1247 eval(shift(@insns)); 1248 eval(shift(@insns)); 1249 1250 &vpxor (@X[0],@X[0],@X[4]); # "X[0]"^=("X[0]"<<96)<<<2 1251 eval(shift(@insns)); 1252 eval(shift(@insns)); 1253 &vmovdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX 1254 eval(shift(@insns)); 1255 eval(shift(@insns)); 1256 1257 foreach (@insns) { eval; } # remaining instructions [if any] 1258 1259 $Xi++; push(@X,shift(@X)); # "rotate" X[] 1260} 1261 1262sub Xupdate_avx_32_79() 1263{ use integer; 1264 my $body = shift; 1265 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions 1266 my ($a,$b,$c,$d,$e); 1267 1268 &vpalignr(@X[2],@X[-1&7],@X[-2&7],8); # compose "X[-6]" 1269 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" 1270 eval(shift(@insns)); # body_20_39 1271 eval(shift(@insns)); 1272 eval(shift(@insns)); 1273 eval(shift(@insns)); # rol 1274 1275 &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]" 1276 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer 1277 eval(shift(@insns)); 1278 eval(shift(@insns)); 1279 if ($Xi%5) { 1280 &vmovdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX... 1281 } else { # ... or load next one 1282 &vmovdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp")); 1283 } 1284 &vpaddd (@X[3],@X[3],@X[-1&7]); 1285 eval(shift(@insns)); # ror 1286 eval(shift(@insns)); 1287 1288 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-6]" 1289 eval(shift(@insns)); # body_20_39 1290 eval(shift(@insns)); 1291 eval(shift(@insns)); 1292 eval(shift(@insns)); # rol 1293 1294 &vpsrld (@X[2],@X[0],30); 1295 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 1296 eval(shift(@insns)); 1297 eval(shift(@insns)); 1298 eval(shift(@insns)); # ror 1299 eval(shift(@insns)); 1300 1301 &vpslld (@X[0],@X[0],2); 1302 eval(shift(@insns)); # body_20_39 1303 eval(shift(@insns)); 1304 eval(shift(@insns)); 1305 eval(shift(@insns)); # rol 1306 eval(shift(@insns)); 1307 eval(shift(@insns)); 1308 eval(shift(@insns)); # ror 1309 eval(shift(@insns)); 1310 1311 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=2 1312 eval(shift(@insns)); # body_20_39 1313 eval(shift(@insns)); 1314 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer 1315 eval(shift(@insns)); 1316 eval(shift(@insns)); # rol 1317 eval(shift(@insns)); 1318 eval(shift(@insns)); 1319 eval(shift(@insns)); # ror 1320 eval(shift(@insns)); 1321 1322 foreach (@insns) { eval; } # remaining instructions 1323 1324 $Xi++; push(@X,shift(@X)); # "rotate" X[] 1325} 1326 1327sub Xuplast_avx_80() 1328{ use integer; 1329 my $body = shift; 1330 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 1331 my ($a,$b,$c,$d,$e); 1332 1333 eval(shift(@insns)); 1334 &vpaddd (@X[3],@X[3],@X[-1&7]); 1335 eval(shift(@insns)); 1336 eval(shift(@insns)); 1337 eval(shift(@insns)); 1338 eval(shift(@insns)); 1339 1340 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU 1341 1342 foreach (@insns) { eval; } # remaining instructions 1343 1344 &mov ($inp=@T[1],&DWP(192+4,"esp")); 1345 &cmp ($inp,&DWP(192+8,"esp")); 1346 &je (&label("done")); 1347 1348 &vmovdqa(@X[3],&QWP(112+48,"esp")); # K_00_19 1349 &vmovdqa(@X[2],&QWP(112+64,"esp")); # pbswap mask 1350 &vmovdqu(@X[-4&7],&QWP(0,$inp)); # load input 1351 &vmovdqu(@X[-3&7],&QWP(16,$inp)); 1352 &vmovdqu(@X[-2&7],&QWP(32,$inp)); 1353 &vmovdqu(@X[-1&7],&QWP(48,$inp)); 1354 &add ($inp,64); 1355 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap 1356 &mov (&DWP(192+4,"esp"),$inp); 1357 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 1358 1359 $Xi=0; 1360} 1361 1362sub Xloop_avx() 1363{ use integer; 1364 my $body = shift; 1365 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 1366 my ($a,$b,$c,$d,$e); 1367 1368 eval(shift(@insns)); 1369 eval(shift(@insns)); 1370 &vpshufb (@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]); 1371 eval(shift(@insns)); 1372 eval(shift(@insns)); 1373 &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@X[3]); 1374 eval(shift(@insns)); 1375 eval(shift(@insns)); 1376 eval(shift(@insns)); 1377 eval(shift(@insns)); 1378 &vmovdqa (&QWP(0+16*$Xi,"esp"),@X[$Xi&7]); # X[]+K xfer to IALU 1379 eval(shift(@insns)); 1380 eval(shift(@insns)); 1381 1382 foreach (@insns) { eval; } 1383 $Xi++; 1384} 1385 1386sub Xtail_avx() 1387{ use integer; 1388 my $body = shift; 1389 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 1390 my ($a,$b,$c,$d,$e); 1391 1392 foreach (@insns) { eval; } 1393} 1394 1395&set_label("loop",16); 1396 &Xupdate_avx_16_31(\&body_00_19); 1397 &Xupdate_avx_16_31(\&body_00_19); 1398 &Xupdate_avx_16_31(\&body_00_19); 1399 &Xupdate_avx_16_31(\&body_00_19); 1400 &Xupdate_avx_32_79(\&body_00_19); 1401 &Xupdate_avx_32_79(\&body_20_39); 1402 &Xupdate_avx_32_79(\&body_20_39); 1403 &Xupdate_avx_32_79(\&body_20_39); 1404 &Xupdate_avx_32_79(\&body_20_39); 1405 &Xupdate_avx_32_79(\&body_20_39); 1406 &Xupdate_avx_32_79(\&body_40_59); 1407 &Xupdate_avx_32_79(\&body_40_59); 1408 &Xupdate_avx_32_79(\&body_40_59); 1409 &Xupdate_avx_32_79(\&body_40_59); 1410 &Xupdate_avx_32_79(\&body_40_59); 1411 &Xupdate_avx_32_79(\&body_20_39); 1412 &Xuplast_avx_80(\&body_20_39); # can jump to "done" 1413 1414 $saved_j=$j; @saved_V=@V; 1415 1416 &Xloop_avx(\&body_20_39); 1417 &Xloop_avx(\&body_20_39); 1418 &Xloop_avx(\&body_20_39); 1419 1420 &mov (@T[1],&DWP(192,"esp")); # update context 1421 &add ($A,&DWP(0,@T[1])); 1422 &add (@T[0],&DWP(4,@T[1])); # $b 1423 &add ($C,&DWP(8,@T[1])); 1424 &mov (&DWP(0,@T[1]),$A); 1425 &add ($D,&DWP(12,@T[1])); 1426 &mov (&DWP(4,@T[1]),@T[0]); 1427 &add ($E,&DWP(16,@T[1])); 1428 &mov ($B,$C); 1429 &mov (&DWP(8,@T[1]),$C); 1430 &xor ($B,$D); 1431 &mov (&DWP(12,@T[1]),$D); 1432 &mov (&DWP(16,@T[1]),$E); 1433 &mov (@T[1],@T[0]); 1434 &and (@T[0],$B); 1435 &mov ($B,@T[1]); 1436 1437 &jmp (&label("loop")); 1438 1439&set_label("done",16); $j=$saved_j; @V=@saved_V; 1440 1441 &Xtail_avx(\&body_20_39); 1442 &Xtail_avx(\&body_20_39); 1443 &Xtail_avx(\&body_20_39); 1444 1445 &vzeroall(); 1446 1447 &mov (@T[1],&DWP(192,"esp")); # update context 1448 &add ($A,&DWP(0,@T[1])); 1449 &mov ("esp",&DWP(192+12,"esp")); # restore %esp 1450 &add (@T[0],&DWP(4,@T[1])); # $b 1451 &add ($C,&DWP(8,@T[1])); 1452 &mov (&DWP(0,@T[1]),$A); 1453 &add ($D,&DWP(12,@T[1])); 1454 &mov (&DWP(4,@T[1]),@T[0]); 1455 &add ($E,&DWP(16,@T[1])); 1456 &mov (&DWP(8,@T[1]),$C); 1457 &mov (&DWP(12,@T[1]),$D); 1458 &mov (&DWP(16,@T[1]),$E); 1459&function_end("_sha1_block_data_order_avx"); 1460} 1461&set_label("K_XX_XX",64); 1462&data_word(0x5a827999,0x5a827999,0x5a827999,0x5a827999); # K_00_19 1463&data_word(0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1); # K_20_39 1464&data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc); # K_40_59 1465&data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6); # K_60_79 1466&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # pbswap mask 1467&data_byte(0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0); 1468} 1469&asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>"); 1470 1471&asm_finish(); 1472