1#!/usr/bin/env perl 2 3# ==================================================================== 4# [Re]written by Andy Polyakov <appro@openssl.org> for the OpenSSL 5# project. The module is, however, dual licensed under OpenSSL and 6# CRYPTOGAMS licenses depending on where you obtain it. For further 7# details see http://www.openssl.org/~appro/cryptogams/. 8# ==================================================================== 9 10# "[Re]written" was achieved in two major overhauls. In 2004 BODY_* 11# functions were re-implemented to address P4 performance issue [see 12# commentary below], and in 2006 the rest was rewritten in order to 13# gain freedom to liberate licensing terms. 14 15# January, September 2004. 16# 17# It was noted that Intel IA-32 C compiler generates code which 18# performs ~30% *faster* on P4 CPU than original *hand-coded* 19# SHA1 assembler implementation. To address this problem (and 20# prove that humans are still better than machines:-), the 21# original code was overhauled, which resulted in following 22# performance changes: 23# 24# compared with original compared with Intel cc 25# assembler impl. generated code 26# Pentium -16% +48% 27# PIII/AMD +8% +16% 28# P4 +85%(!) +45% 29# 30# As you can see Pentium came out as looser:-( Yet I reckoned that 31# improvement on P4 outweights the loss and incorporate this 32# re-tuned code to 0.9.7 and later. 33# ---------------------------------------------------------------- 34# <appro@fy.chalmers.se> 35 36# August 2009. 37# 38# George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as 39# '(c&d) + (b&(c^d))', which allows to accumulate partial results 40# and lighten "pressure" on scratch registers. This resulted in 41# >12% performance improvement on contemporary AMD cores (with no 42# degradation on other CPUs:-). Also, the code was revised to maximize 43# "distance" between instructions producing input to 'lea' instruction 44# and the 'lea' instruction itself, which is essential for Intel Atom 45# core and resulted in ~15% improvement. 46 47# October 2010. 48# 49# Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it 50# is to offload message schedule denoted by Wt in NIST specification, 51# or Xupdate in OpenSSL source, to SIMD unit. The idea is not novel, 52# and in SSE2 context was first explored by Dean Gaudet in 2004, see 53# http://arctic.org/~dean/crypto/sha1.html. Since then several things 54# have changed that made it interesting again: 55# 56# a) XMM units became faster and wider; 57# b) instruction set became more versatile; 58# c) an important observation was made by Max Locktykhin, which made 59# it possible to reduce amount of instructions required to perform 60# the operation in question, for further details see 61# http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/. 62 63# April 2011. 64# 65# Add AVX code path, probably most controversial... The thing is that 66# switch to AVX alone improves performance by as little as 4% in 67# comparison to SSSE3 code path. But below result doesn't look like 68# 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as 69# pair of µ-ops, and it's the additional µ-ops, two per round, that 70# make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded 71# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with 72# equivalent 'sh[rl]d' that is responsible for the impressive 5.1 73# cycles per processed byte. But 'sh[rl]d' is not something that used 74# to be fast, nor does it appear to be fast in upcoming Bulldozer 75# [according to its optimization manual]. Which is why AVX code path 76# is guarded by *both* AVX and synthetic bit denoting Intel CPUs. 77# One can argue that it's unfair to AMD, but without 'sh[rl]d' it 78# makes no sense to keep the AVX code path. If somebody feels that 79# strongly, it's probably more appropriate to discuss possibility of 80# using vector rotate XOP on AMD... 81 82# March 2014. 83# 84# Add support for Intel SHA Extensions. 85 86###################################################################### 87# Current performance is summarized in following table. Numbers are 88# CPU clock cycles spent to process single byte (less is better). 89# 90# x86 SSSE3 AVX 91# Pentium 15.7 - 92# PIII 11.5 - 93# P4 10.6 - 94# AMD K8 7.1 - 95# Core2 7.3 6.0/+22% - 96# Westmere 7.3 5.5/+33% - 97# Sandy Bridge 8.8 6.2/+40% 5.1(**)/+73% 98# Ivy Bridge 7.2 4.8/+51% 4.7(**)/+53% 99# Haswell 6.5 4.3/+51% 4.1(**)/+58% 100# Skylake 6.4 4.1/+55% 4.1(**)/+55% 101# Bulldozer 11.6 6.0/+92% 102# VIA Nano 10.6 7.5/+41% 103# Atom 12.5 9.3(*)/+35% 104# Silvermont 14.5 9.9(*)/+46% 105# Goldmont 8.8 6.7/+30% 1.7(***)/+415% 106# 107# (*) Loop is 1056 instructions long and expected result is ~8.25. 108# The discrepancy is because of front-end limitations, so 109# called MS-ROM penalties, and on Silvermont even rotate's 110# limited parallelism. 111# 112# (**) As per above comment, the result is for AVX *plus* sh[rl]d. 113# 114# (***) SHAEXT result 115 116$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 117push(@INC,"${dir}","${dir}../../perlasm"); 118require "x86asm.pl"; 119 120$output=pop; 121open STDOUT,">$output"; 122 123&asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386"); 124 125$xmm=$ymm=0; 126for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); } 127 128# In upstream, this is controlled by shelling out to the compiler to check 129# versions, but BoringSSL is intended to be used with pre-generated perlasm 130# output, so this isn't useful anyway. 131$ymm = 1; 132 133$ymm = 0 unless ($xmm); 134 135$shaext=$xmm; ### set to zero if compiling for 1.0.1 136 137# TODO(davidben): Consider enabling the Intel SHA Extensions code once it's 138# been tested. 139$shaext = 0; 140 141&external_label("OPENSSL_ia32cap_P") if ($xmm); 142 143 144$A="eax"; 145$B="ebx"; 146$C="ecx"; 147$D="edx"; 148$E="edi"; 149$T="esi"; 150$tmp1="ebp"; 151 152@V=($A,$B,$C,$D,$E,$T); 153 154$alt=0; # 1 denotes alternative IALU implementation, which performs 155 # 8% *worse* on P4, same on Westmere and Atom, 2% better on 156 # Sandy Bridge... 157 158sub BODY_00_15 159 { 160 local($n,$a,$b,$c,$d,$e,$f)=@_; 161 162 &comment("00_15 $n"); 163 164 &mov($f,$c); # f to hold F_00_19(b,c,d) 165 if ($n==0) { &mov($tmp1,$a); } 166 else { &mov($a,$tmp1); } 167 &rotl($tmp1,5); # tmp1=ROTATE(a,5) 168 &xor($f,$d); 169 &add($tmp1,$e); # tmp1+=e; 170 &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded 171 # with xi, also note that e becomes 172 # f in next round... 173 &and($f,$b); 174 &rotr($b,2); # b=ROTATE(b,30) 175 &xor($f,$d); # f holds F_00_19(b,c,d) 176 &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi 177 178 if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round 179 &add($f,$tmp1); } # f+=tmp1 180 else { &add($tmp1,$f); } # f becomes a in next round 181 &mov($tmp1,$a) if ($alt && $n==15); 182 } 183 184sub BODY_16_19 185 { 186 local($n,$a,$b,$c,$d,$e,$f)=@_; 187 188 &comment("16_19 $n"); 189 190if ($alt) { 191 &xor($c,$d); 192 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 193 &and($tmp1,$c); # tmp1 to hold F_00_19(b,c,d), b&=c^d 194 &xor($f,&swtmp(($n+8)%16)); 195 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d) 196 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 197 &rotl($f,1); # f=ROTATE(f,1) 198 &add($e,$tmp1); # e+=F_00_19(b,c,d) 199 &xor($c,$d); # restore $c 200 &mov($tmp1,$a); # b in next round 201 &rotr($b,$n==16?2:7); # b=ROTATE(b,30) 202 &mov(&swtmp($n%16),$f); # xi=f 203 &rotl($a,5); # ROTATE(a,5) 204 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e 205 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 206 &add($f,$a); # f+=ROTATE(a,5) 207} else { 208 &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d) 209 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 210 &xor($tmp1,$d); 211 &xor($f,&swtmp(($n+8)%16)); 212 &and($tmp1,$b); 213 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 214 &rotl($f,1); # f=ROTATE(f,1) 215 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d) 216 &add($e,$tmp1); # e+=F_00_19(b,c,d) 217 &mov($tmp1,$a); 218 &rotr($b,2); # b=ROTATE(b,30) 219 &mov(&swtmp($n%16),$f); # xi=f 220 &rotl($tmp1,5); # ROTATE(a,5) 221 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e 222 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 223 &add($f,$tmp1); # f+=ROTATE(a,5) 224} 225 } 226 227sub BODY_20_39 228 { 229 local($n,$a,$b,$c,$d,$e,$f)=@_; 230 local $K=($n<40)?0x6ed9eba1:0xca62c1d6; 231 232 &comment("20_39 $n"); 233 234if ($alt) { 235 &xor($tmp1,$c); # tmp1 to hold F_20_39(b,c,d), b^=c 236 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 237 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d) 238 &xor($f,&swtmp(($n+8)%16)); 239 &add($e,$tmp1); # e+=F_20_39(b,c,d) 240 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 241 &rotl($f,1); # f=ROTATE(f,1) 242 &mov($tmp1,$a); # b in next round 243 &rotr($b,7); # b=ROTATE(b,30) 244 &mov(&swtmp($n%16),$f) if($n<77);# xi=f 245 &rotl($a,5); # ROTATE(a,5) 246 &xor($b,$c) if($n==39);# warm up for BODY_40_59 247 &and($tmp1,$b) if($n==39); 248 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY 249 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round 250 &add($f,$a); # f+=ROTATE(a,5) 251 &rotr($a,5) if ($n==79); 252} else { 253 &mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d) 254 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 255 &xor($tmp1,$c); 256 &xor($f,&swtmp(($n+8)%16)); 257 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d) 258 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 259 &rotl($f,1); # f=ROTATE(f,1) 260 &add($e,$tmp1); # e+=F_20_39(b,c,d) 261 &rotr($b,2); # b=ROTATE(b,30) 262 &mov($tmp1,$a); 263 &rotl($tmp1,5); # ROTATE(a,5) 264 &mov(&swtmp($n%16),$f) if($n<77);# xi=f 265 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY 266 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round 267 &add($f,$tmp1); # f+=ROTATE(a,5) 268} 269 } 270 271sub BODY_40_59 272 { 273 local($n,$a,$b,$c,$d,$e,$f)=@_; 274 275 &comment("40_59 $n"); 276 277if ($alt) { 278 &add($e,$tmp1); # e+=b&(c^d) 279 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 280 &mov($tmp1,$d); 281 &xor($f,&swtmp(($n+8)%16)); 282 &xor($c,$d); # restore $c 283 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 284 &rotl($f,1); # f=ROTATE(f,1) 285 &and($tmp1,$c); 286 &rotr($b,7); # b=ROTATE(b,30) 287 &add($e,$tmp1); # e+=c&d 288 &mov($tmp1,$a); # b in next round 289 &mov(&swtmp($n%16),$f); # xi=f 290 &rotl($a,5); # ROTATE(a,5) 291 &xor($b,$c) if ($n<59); 292 &and($tmp1,$b) if ($n<59);# tmp1 to hold F_40_59(b,c,d) 293 &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e+(b&(c^d)) 294 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 295 &add($f,$a); # f+=ROTATE(a,5) 296} else { 297 &mov($tmp1,$c); # tmp1 to hold F_40_59(b,c,d) 298 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 299 &xor($tmp1,$d); 300 &xor($f,&swtmp(($n+8)%16)); 301 &and($tmp1,$b); 302 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 303 &rotl($f,1); # f=ROTATE(f,1) 304 &add($tmp1,$e); # b&(c^d)+=e 305 &rotr($b,2); # b=ROTATE(b,30) 306 &mov($e,$a); # e becomes volatile 307 &rotl($e,5); # ROTATE(a,5) 308 &mov(&swtmp($n%16),$f); # xi=f 309 &lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d)) 310 &mov($tmp1,$c); 311 &add($f,$e); # f+=ROTATE(a,5) 312 &and($tmp1,$d); 313 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 314 &add($f,$tmp1); # f+=c&d 315} 316 } 317 318&function_begin("sha1_block_data_order"); 319if ($xmm) { 320 &static_label("shaext_shortcut") if ($shaext); 321 &static_label("ssse3_shortcut"); 322 &static_label("avx_shortcut") if ($ymm); 323 &static_label("K_XX_XX"); 324 325 &call (&label("pic_point")); # make it PIC! 326 &set_label("pic_point"); 327 &blindpop($tmp1); 328 &picmeup($T,"OPENSSL_ia32cap_P",$tmp1,&label("pic_point")); 329 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 330 331 &mov ($A,&DWP(0,$T)); 332 &mov ($D,&DWP(4,$T)); 333 &test ($D,1<<9); # check SSSE3 bit 334 &jz (&label("x86")); 335 &mov ($C,&DWP(8,$T)); 336 &test ($A,1<<24); # check FXSR bit 337 &jz (&label("x86")); 338 if ($shaext) { 339 &test ($C,1<<29); # check SHA bit 340 &jnz (&label("shaext_shortcut")); 341 } 342 if ($ymm) { 343 &and ($D,1<<28); # mask AVX bit 344 &and ($A,1<<30); # mask "Intel CPU" bit 345 &or ($A,$D); 346 &cmp ($A,1<<28|1<<30); 347 &je (&label("avx_shortcut")); 348 } 349 &jmp (&label("ssse3_shortcut")); 350 &set_label("x86",16); 351} 352 &mov($tmp1,&wparam(0)); # SHA_CTX *c 353 &mov($T,&wparam(1)); # const void *input 354 &mov($A,&wparam(2)); # size_t num 355 &stack_push(16+3); # allocate X[16] 356 &shl($A,6); 357 &add($A,$T); 358 &mov(&wparam(2),$A); # pointer beyond the end of input 359 &mov($E,&DWP(16,$tmp1));# pre-load E 360 &jmp(&label("loop")); 361 362&set_label("loop",16); 363 364 # copy input chunk to X, but reversing byte order! 365 for ($i=0; $i<16; $i+=4) 366 { 367 &mov($A,&DWP(4*($i+0),$T)); 368 &mov($B,&DWP(4*($i+1),$T)); 369 &mov($C,&DWP(4*($i+2),$T)); 370 &mov($D,&DWP(4*($i+3),$T)); 371 &bswap($A); 372 &bswap($B); 373 &bswap($C); 374 &bswap($D); 375 &mov(&swtmp($i+0),$A); 376 &mov(&swtmp($i+1),$B); 377 &mov(&swtmp($i+2),$C); 378 &mov(&swtmp($i+3),$D); 379 } 380 &mov(&wparam(1),$T); # redundant in 1st spin 381 382 &mov($A,&DWP(0,$tmp1)); # load SHA_CTX 383 &mov($B,&DWP(4,$tmp1)); 384 &mov($C,&DWP(8,$tmp1)); 385 &mov($D,&DWP(12,$tmp1)); 386 # E is pre-loaded 387 388 for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); } 389 for(;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); } 390 for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } 391 for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } 392 for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } 393 394 (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check 395 396 &mov($tmp1,&wparam(0)); # re-load SHA_CTX* 397 &mov($D,&wparam(1)); # D is last "T" and is discarded 398 399 &add($E,&DWP(0,$tmp1)); # E is last "A"... 400 &add($T,&DWP(4,$tmp1)); 401 &add($A,&DWP(8,$tmp1)); 402 &add($B,&DWP(12,$tmp1)); 403 &add($C,&DWP(16,$tmp1)); 404 405 &mov(&DWP(0,$tmp1),$E); # update SHA_CTX 406 &add($D,64); # advance input pointer 407 &mov(&DWP(4,$tmp1),$T); 408 &cmp($D,&wparam(2)); # have we reached the end yet? 409 &mov(&DWP(8,$tmp1),$A); 410 &mov($E,$C); # C is last "E" which needs to be "pre-loaded" 411 &mov(&DWP(12,$tmp1),$B); 412 &mov($T,$D); # input pointer 413 &mov(&DWP(16,$tmp1),$C); 414 &jb(&label("loop")); 415 416 &stack_pop(16+3); 417&function_end("sha1_block_data_order"); 418 419if ($xmm) { 420if ($shaext) { 421###################################################################### 422# Intel SHA Extensions implementation of SHA1 update function. 423# 424my ($ctx,$inp,$num)=("edi","esi","ecx"); 425my ($ABCD,$E,$E_,$BSWAP)=map("xmm$_",(0..3)); 426my @MSG=map("xmm$_",(4..7)); 427 428sub sha1rnds4 { 429 my ($dst,$src,$imm)=@_; 430 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/) 431 { &data_byte(0x0f,0x3a,0xcc,0xc0|($1<<3)|$2,$imm); } 432} 433sub sha1op38 { 434 my ($opcodelet,$dst,$src)=@_; 435 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/) 436 { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); } 437} 438sub sha1nexte { sha1op38(0xc8,@_); } 439sub sha1msg1 { sha1op38(0xc9,@_); } 440sub sha1msg2 { sha1op38(0xca,@_); } 441 442&function_begin("_sha1_block_data_order_shaext"); 443 &call (&label("pic_point")); # make it PIC! 444 &set_label("pic_point"); 445 &blindpop($tmp1); 446 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 447&set_label("shaext_shortcut"); 448 &mov ($ctx,&wparam(0)); 449 &mov ("ebx","esp"); 450 &mov ($inp,&wparam(1)); 451 &mov ($num,&wparam(2)); 452 &sub ("esp",32); 453 454 &movdqu ($ABCD,&QWP(0,$ctx)); 455 &movd ($E,&DWP(16,$ctx)); 456 &and ("esp",-32); 457 &movdqa ($BSWAP,&QWP(0x50,$tmp1)); # byte-n-word swap 458 459 &movdqu (@MSG[0],&QWP(0,$inp)); 460 &pshufd ($ABCD,$ABCD,0b00011011); # flip word order 461 &movdqu (@MSG[1],&QWP(0x10,$inp)); 462 &pshufd ($E,$E,0b00011011); # flip word order 463 &movdqu (@MSG[2],&QWP(0x20,$inp)); 464 &pshufb (@MSG[0],$BSWAP); 465 &movdqu (@MSG[3],&QWP(0x30,$inp)); 466 &pshufb (@MSG[1],$BSWAP); 467 &pshufb (@MSG[2],$BSWAP); 468 &pshufb (@MSG[3],$BSWAP); 469 &jmp (&label("loop_shaext")); 470 471&set_label("loop_shaext",16); 472 &dec ($num); 473 &lea ("eax",&DWP(0x40,$inp)); 474 &movdqa (&QWP(0,"esp"),$E); # offload $E 475 &paddd ($E,@MSG[0]); 476 &cmovne ($inp,"eax"); 477 &movdqa (&QWP(16,"esp"),$ABCD); # offload $ABCD 478 479for($i=0;$i<20-4;$i+=2) { 480 &sha1msg1 (@MSG[0],@MSG[1]); 481 &movdqa ($E_,$ABCD); 482 &sha1rnds4 ($ABCD,$E,int($i/5)); # 0-3... 483 &sha1nexte ($E_,@MSG[1]); 484 &pxor (@MSG[0],@MSG[2]); 485 &sha1msg1 (@MSG[1],@MSG[2]); 486 &sha1msg2 (@MSG[0],@MSG[3]); 487 488 &movdqa ($E,$ABCD); 489 &sha1rnds4 ($ABCD,$E_,int(($i+1)/5)); 490 &sha1nexte ($E,@MSG[2]); 491 &pxor (@MSG[1],@MSG[3]); 492 &sha1msg2 (@MSG[1],@MSG[0]); 493 494 push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG)); 495} 496 &movdqu (@MSG[0],&QWP(0,$inp)); 497 &movdqa ($E_,$ABCD); 498 &sha1rnds4 ($ABCD,$E,3); # 64-67 499 &sha1nexte ($E_,@MSG[1]); 500 &movdqu (@MSG[1],&QWP(0x10,$inp)); 501 &pshufb (@MSG[0],$BSWAP); 502 503 &movdqa ($E,$ABCD); 504 &sha1rnds4 ($ABCD,$E_,3); # 68-71 505 &sha1nexte ($E,@MSG[2]); 506 &movdqu (@MSG[2],&QWP(0x20,$inp)); 507 &pshufb (@MSG[1],$BSWAP); 508 509 &movdqa ($E_,$ABCD); 510 &sha1rnds4 ($ABCD,$E,3); # 72-75 511 &sha1nexte ($E_,@MSG[3]); 512 &movdqu (@MSG[3],&QWP(0x30,$inp)); 513 &pshufb (@MSG[2],$BSWAP); 514 515 &movdqa ($E,$ABCD); 516 &sha1rnds4 ($ABCD,$E_,3); # 76-79 517 &movdqa ($E_,&QWP(0,"esp")); 518 &pshufb (@MSG[3],$BSWAP); 519 &sha1nexte ($E,$E_); 520 &paddd ($ABCD,&QWP(16,"esp")); 521 522 &jnz (&label("loop_shaext")); 523 524 &pshufd ($ABCD,$ABCD,0b00011011); 525 &pshufd ($E,$E,0b00011011); 526 &movdqu (&QWP(0,$ctx),$ABCD) 527 &movd (&DWP(16,$ctx),$E); 528 &mov ("esp","ebx"); 529&function_end("_sha1_block_data_order_shaext"); 530} 531###################################################################### 532# The SSSE3 implementation. 533# 534# %xmm[0-7] are used as ring @X[] buffer containing quadruples of last 535# 32 elements of the message schedule or Xupdate outputs. First 4 536# quadruples are simply byte-swapped input, next 4 are calculated 537# according to method originally suggested by Dean Gaudet (modulo 538# being implemented in SSSE3). Once 8 quadruples or 32 elements are 539# collected, it switches to routine proposed by Max Locktyukhin. 540# 541# Calculations inevitably require temporary reqisters, and there are 542# no %xmm registers left to spare. For this reason part of the ring 543# buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring 544# buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] - 545# X[-5], and X[4] - X[-4]... 546# 547# Another notable optimization is aggressive stack frame compression 548# aiming to minimize amount of 9-byte instructions... 549# 550# Yet another notable optimization is "jumping" $B variable. It means 551# that there is no register permanently allocated for $B value. This 552# allowed to eliminate one instruction from body_20_39... 553# 554my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded 555my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4 556my @V=($A,$B,$C,$D,$E); 557my $j=0; # hash round 558my $rx=0; 559my @T=($T,$tmp1); 560my $inp; 561 562my $_rol=sub { &rol(@_) }; 563my $_ror=sub { &ror(@_) }; 564 565&function_begin("_sha1_block_data_order_ssse3"); 566 &call (&label("pic_point")); # make it PIC! 567 &set_label("pic_point"); 568 &blindpop($tmp1); 569 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 570&set_label("ssse3_shortcut"); 571 572 &movdqa (@X[3],&QWP(0,$tmp1)); # K_00_19 573 &movdqa (@X[4],&QWP(16,$tmp1)); # K_20_39 574 &movdqa (@X[5],&QWP(32,$tmp1)); # K_40_59 575 &movdqa (@X[6],&QWP(48,$tmp1)); # K_60_79 576 &movdqa (@X[2],&QWP(64,$tmp1)); # pbswap mask 577 578 &mov ($E,&wparam(0)); # load argument block 579 &mov ($inp=@T[1],&wparam(1)); 580 &mov ($D,&wparam(2)); 581 &mov (@T[0],"esp"); 582 583 # stack frame layout 584 # 585 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area 586 # X[4]+K X[5]+K X[6]+K X[7]+K 587 # X[8]+K X[9]+K X[10]+K X[11]+K 588 # X[12]+K X[13]+K X[14]+K X[15]+K 589 # 590 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area 591 # X[4] X[5] X[6] X[7] 592 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19 593 # 594 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants 595 # K_40_59 K_40_59 K_40_59 K_40_59 596 # K_60_79 K_60_79 K_60_79 K_60_79 597 # K_00_19 K_00_19 K_00_19 K_00_19 598 # pbswap mask 599 # 600 # +192 ctx # argument block 601 # +196 inp 602 # +200 end 603 # +204 esp 604 &sub ("esp",208); 605 &and ("esp",-64); 606 607 &movdqa (&QWP(112+0,"esp"),@X[4]); # copy constants 608 &movdqa (&QWP(112+16,"esp"),@X[5]); 609 &movdqa (&QWP(112+32,"esp"),@X[6]); 610 &shl ($D,6); # len*64 611 &movdqa (&QWP(112+48,"esp"),@X[3]); 612 &add ($D,$inp); # end of input 613 &movdqa (&QWP(112+64,"esp"),@X[2]); 614 &add ($inp,64); 615 &mov (&DWP(192+0,"esp"),$E); # save argument block 616 &mov (&DWP(192+4,"esp"),$inp); 617 &mov (&DWP(192+8,"esp"),$D); 618 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp 619 620 &mov ($A,&DWP(0,$E)); # load context 621 &mov ($B,&DWP(4,$E)); 622 &mov ($C,&DWP(8,$E)); 623 &mov ($D,&DWP(12,$E)); 624 &mov ($E,&DWP(16,$E)); 625 &mov (@T[0],$B); # magic seed 626 627 &movdqu (@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3] 628 &movdqu (@X[-3&7],&QWP(-48,$inp)); 629 &movdqu (@X[-2&7],&QWP(-32,$inp)); 630 &movdqu (@X[-1&7],&QWP(-16,$inp)); 631 &pshufb (@X[-4&7],@X[2]); # byte swap 632 &pshufb (@X[-3&7],@X[2]); 633 &pshufb (@X[-2&7],@X[2]); 634 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 635 &pshufb (@X[-1&7],@X[2]); 636 &paddd (@X[-4&7],@X[3]); # add K_00_19 637 &paddd (@X[-3&7],@X[3]); 638 &paddd (@X[-2&7],@X[3]); 639 &movdqa (&QWP(0,"esp"),@X[-4&7]); # X[]+K xfer to IALU 640 &psubd (@X[-4&7],@X[3]); # restore X[] 641 &movdqa (&QWP(0+16,"esp"),@X[-3&7]); 642 &psubd (@X[-3&7],@X[3]); 643 &movdqa (&QWP(0+32,"esp"),@X[-2&7]); 644 &mov (@T[1],$C); 645 &psubd (@X[-2&7],@X[3]); 646 &xor (@T[1],$D); 647 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]); 648 &and (@T[0],@T[1]); 649 &jmp (&label("loop")); 650 651###################################################################### 652# SSE instruction sequence is first broken to groups of indepentent 653# instructions, independent in respect to their inputs and shifter 654# (not all architectures have more than one). Then IALU instructions 655# are "knitted in" between the SSE groups. Distance is maintained for 656# SSE latency of 2 in hope that it fits better upcoming AMD Bulldozer 657# [which allegedly also implements SSSE3]... 658# 659# Temporary registers usage. X[2] is volatile at the entry and at the 660# end is restored from backtrace ring buffer. X[3] is expected to 661# contain current K_XX_XX constant and is used to caclulate X[-1]+K 662# from previous round, it becomes volatile the moment the value is 663# saved to stack for transfer to IALU. X[4] becomes volatile whenever 664# X[-4] is accumulated and offloaded to backtrace ring buffer, at the 665# end it is loaded with next K_XX_XX [which becomes X[3] in next 666# round]... 667# 668sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4 669{ use integer; 670 my $body = shift; 671 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions 672 my ($a,$b,$c,$d,$e); 673 674 eval(shift(@insns)); # ror 675 eval(shift(@insns)); 676 eval(shift(@insns)); 677 &punpcklqdq(@X[0],@X[-3&7]); # compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8); 678 &movdqa (@X[2],@X[-1&7]); 679 eval(shift(@insns)); 680 eval(shift(@insns)); 681 682 &paddd (@X[3],@X[-1&7]); 683 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer 684 eval(shift(@insns)); # rol 685 eval(shift(@insns)); 686 &psrldq (@X[2],4); # "X[-3]", 3 dwords 687 eval(shift(@insns)); 688 eval(shift(@insns)); 689 &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]" 690 eval(shift(@insns)); 691 eval(shift(@insns)); # ror 692 693 &pxor (@X[2],@X[-2&7]); # "X[-3]"^"X[-8]" 694 eval(shift(@insns)); 695 eval(shift(@insns)); 696 eval(shift(@insns)); 697 698 &pxor (@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]" 699 eval(shift(@insns)); 700 eval(shift(@insns)); # rol 701 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 702 eval(shift(@insns)); 703 eval(shift(@insns)); 704 705 &movdqa (@X[4],@X[0]); 706 eval(shift(@insns)); 707 eval(shift(@insns)); 708 eval(shift(@insns)); # ror 709 &movdqa (@X[2],@X[0]); 710 eval(shift(@insns)); 711 712 &pslldq (@X[4],12); # "X[0]"<<96, extract one dword 713 &paddd (@X[0],@X[0]); 714 eval(shift(@insns)); 715 eval(shift(@insns)); 716 717 &psrld (@X[2],31); 718 eval(shift(@insns)); 719 eval(shift(@insns)); # rol 720 &movdqa (@X[3],@X[4]); 721 eval(shift(@insns)); 722 eval(shift(@insns)); 723 eval(shift(@insns)); 724 725 &psrld (@X[4],30); 726 eval(shift(@insns)); 727 eval(shift(@insns)); # ror 728 &por (@X[0],@X[2]); # "X[0]"<<<=1 729 eval(shift(@insns)); 730 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer 731 eval(shift(@insns)); 732 eval(shift(@insns)); 733 734 &pslld (@X[3],2); 735 eval(shift(@insns)); 736 eval(shift(@insns)); # rol 737 &pxor (@X[0],@X[4]); 738 &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX 739 eval(shift(@insns)); 740 eval(shift(@insns)); 741 742 &pxor (@X[0],@X[3]); # "X[0]"^=("X[0]"<<96)<<<2 743 &pshufd (@X[1],@X[-3&7],0xee) if ($Xi<7); # was &movdqa (@X[1],@X[-2&7]) 744 &pshufd (@X[3],@X[-1&7],0xee) if ($Xi==7); 745 eval(shift(@insns)); 746 eval(shift(@insns)); 747 748 foreach (@insns) { eval; } # remaining instructions [if any] 749 750 $Xi++; push(@X,shift(@X)); # "rotate" X[] 751} 752 753sub Xupdate_ssse3_32_79() 754{ use integer; 755 my $body = shift; 756 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions 757 my ($a,$b,$c,$d,$e); 758 759 eval(shift(@insns)); # body_20_39 760 &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" 761 &punpcklqdq(@X[2],@X[-1&7]); # compose "X[-6]", was &palignr(@X[2],@X[-2&7],8) 762 eval(shift(@insns)); 763 eval(shift(@insns)); 764 eval(shift(@insns)); # rol 765 766 &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]" 767 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer 768 eval(shift(@insns)); 769 eval(shift(@insns)); 770 eval(shift(@insns)) if (@insns[0] =~ /_rol/); 771 if ($Xi%5) { 772 &movdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX... 773 } else { # ... or load next one 774 &movdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp")); 775 } 776 eval(shift(@insns)); # ror 777 &paddd (@X[3],@X[-1&7]); 778 eval(shift(@insns)); 779 780 &pxor (@X[0],@X[2]); # "X[0]"^="X[-6]" 781 eval(shift(@insns)); # body_20_39 782 eval(shift(@insns)); 783 eval(shift(@insns)); 784 eval(shift(@insns)); # rol 785 786 &movdqa (@X[2],@X[0]); 787 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 788 eval(shift(@insns)); 789 eval(shift(@insns)); 790 eval(shift(@insns)); # ror 791 eval(shift(@insns)); 792 eval(shift(@insns)) if (@insns[0] =~ /_rol/); 793 794 &pslld (@X[0],2); 795 eval(shift(@insns)); # body_20_39 796 eval(shift(@insns)); 797 &psrld (@X[2],30); 798 eval(shift(@insns)); 799 eval(shift(@insns)); # rol 800 eval(shift(@insns)); 801 eval(shift(@insns)); 802 eval(shift(@insns)); # ror 803 eval(shift(@insns)); 804 eval(shift(@insns)) if (@insns[1] =~ /_rol/); 805 eval(shift(@insns)) if (@insns[0] =~ /_rol/); 806 807 &por (@X[0],@X[2]); # "X[0]"<<<=2 808 eval(shift(@insns)); # body_20_39 809 eval(shift(@insns)); 810 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer 811 eval(shift(@insns)); 812 eval(shift(@insns)); # rol 813 eval(shift(@insns)); 814 eval(shift(@insns)); 815 eval(shift(@insns)); # ror 816 &pshufd (@X[3],@X[-1],0xee) if ($Xi<19); # was &movdqa (@X[3],@X[0]) 817 eval(shift(@insns)); 818 819 foreach (@insns) { eval; } # remaining instructions 820 821 $Xi++; push(@X,shift(@X)); # "rotate" X[] 822} 823 824sub Xuplast_ssse3_80() 825{ use integer; 826 my $body = shift; 827 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 828 my ($a,$b,$c,$d,$e); 829 830 eval(shift(@insns)); 831 eval(shift(@insns)); 832 eval(shift(@insns)); 833 eval(shift(@insns)); 834 eval(shift(@insns)); 835 eval(shift(@insns)); 836 eval(shift(@insns)); 837 &paddd (@X[3],@X[-1&7]); 838 eval(shift(@insns)); 839 eval(shift(@insns)); 840 eval(shift(@insns)); 841 eval(shift(@insns)); 842 843 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU 844 845 foreach (@insns) { eval; } # remaining instructions 846 847 &mov ($inp=@T[1],&DWP(192+4,"esp")); 848 &cmp ($inp,&DWP(192+8,"esp")); 849 &je (&label("done")); 850 851 &movdqa (@X[3],&QWP(112+48,"esp")); # K_00_19 852 &movdqa (@X[2],&QWP(112+64,"esp")); # pbswap mask 853 &movdqu (@X[-4&7],&QWP(0,$inp)); # load input 854 &movdqu (@X[-3&7],&QWP(16,$inp)); 855 &movdqu (@X[-2&7],&QWP(32,$inp)); 856 &movdqu (@X[-1&7],&QWP(48,$inp)); 857 &add ($inp,64); 858 &pshufb (@X[-4&7],@X[2]); # byte swap 859 &mov (&DWP(192+4,"esp"),$inp); 860 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 861 862 $Xi=0; 863} 864 865sub Xloop_ssse3() 866{ use integer; 867 my $body = shift; 868 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 869 my ($a,$b,$c,$d,$e); 870 871 eval(shift(@insns)); 872 eval(shift(@insns)); 873 eval(shift(@insns)); 874 eval(shift(@insns)); 875 eval(shift(@insns)); 876 eval(shift(@insns)); 877 eval(shift(@insns)); 878 &pshufb (@X[($Xi-3)&7],@X[2]); 879 eval(shift(@insns)); 880 eval(shift(@insns)); 881 eval(shift(@insns)); 882 eval(shift(@insns)); 883 &paddd (@X[($Xi-4)&7],@X[3]); 884 eval(shift(@insns)); 885 eval(shift(@insns)); 886 eval(shift(@insns)); 887 eval(shift(@insns)); 888 &movdqa (&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]); # X[]+K xfer to IALU 889 eval(shift(@insns)); 890 eval(shift(@insns)); 891 eval(shift(@insns)); 892 eval(shift(@insns)); 893 &psubd (@X[($Xi-4)&7],@X[3]); 894 895 foreach (@insns) { eval; } 896 $Xi++; 897} 898 899sub Xtail_ssse3() 900{ use integer; 901 my $body = shift; 902 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 903 my ($a,$b,$c,$d,$e); 904 905 foreach (@insns) { eval; } 906} 907 908sub body_00_19 () { # ((c^d)&b)^d 909 # on start @T[0]=(c^d)&b 910 return &body_20_39() if ($rx==19); $rx++; 911 ( 912 '($a,$b,$c,$d,$e)=@V;'. 913 '&$_ror ($b,$j?7:2);', # $b>>>2 914 '&xor (@T[0],$d);', 915 '&mov (@T[1],$a);', # $b in next round 916 917 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer 918 '&xor ($b,$c);', # $c^$d for next round 919 920 '&$_rol ($a,5);', 921 '&add ($e,@T[0]);', 922 '&and (@T[1],$b);', # ($b&($c^$d)) for next round 923 924 '&xor ($b,$c);', # restore $b 925 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 926 ); 927} 928 929sub body_20_39 () { # b^d^c 930 # on entry @T[0]=b^d 931 return &body_40_59() if ($rx==39); $rx++; 932 ( 933 '($a,$b,$c,$d,$e)=@V;'. 934 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer 935 '&xor (@T[0],$d) if($j==19);'. 936 '&xor (@T[0],$c) if($j> 19);', # ($b^$d^$c) 937 '&mov (@T[1],$a);', # $b in next round 938 939 '&$_rol ($a,5);', 940 '&add ($e,@T[0]);', 941 '&xor (@T[1],$c) if ($j< 79);', # $b^$d for next round 942 943 '&$_ror ($b,7);', # $b>>>2 944 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 945 ); 946} 947 948sub body_40_59 () { # ((b^c)&(c^d))^c 949 # on entry @T[0]=(b^c), (c^=d) 950 $rx++; 951 ( 952 '($a,$b,$c,$d,$e)=@V;'. 953 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer 954 '&and (@T[0],$c) if ($j>=40);', # (b^c)&(c^d) 955 '&xor ($c,$d) if ($j>=40);', # restore $c 956 957 '&$_ror ($b,7);', # $b>>>2 958 '&mov (@T[1],$a);', # $b for next round 959 '&xor (@T[0],$c);', 960 961 '&$_rol ($a,5);', 962 '&add ($e,@T[0]);', 963 '&xor (@T[1],$c) if ($j==59);'. 964 '&xor (@T[1],$b) if ($j< 59);', # b^c for next round 965 966 '&xor ($b,$c) if ($j< 59);', # c^d for next round 967 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 968 ); 969} 970###### 971sub bodyx_00_19 () { # ((c^d)&b)^d 972 # on start @T[0]=(b&c)^(~b&d), $e+=X[]+K 973 return &bodyx_20_39() if ($rx==19); $rx++; 974 ( 975 '($a,$b,$c,$d,$e)=@V;'. 976 977 '&rorx ($b,$b,2) if ($j==0);'. # $b>>>2 978 '&rorx ($b,@T[1],7) if ($j!=0);', # $b>>>2 979 '&lea ($e,&DWP(0,$e,@T[0]));', 980 '&rorx (@T[0],$a,5);', 981 982 '&andn (@T[1],$a,$c);', 983 '&and ($a,$b)', 984 '&add ($d,&DWP(4*(($j+1)&15),"esp"));', # X[]+K xfer 985 986 '&xor (@T[1],$a)', 987 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 988 ); 989} 990 991sub bodyx_20_39 () { # b^d^c 992 # on start $b=b^c^d 993 return &bodyx_40_59() if ($rx==39); $rx++; 994 ( 995 '($a,$b,$c,$d,$e)=@V;'. 996 997 '&add ($e,($j==19?@T[0]:$b))', 998 '&rorx ($b,@T[1],7);', # $b>>>2 999 '&rorx (@T[0],$a,5);', 1000 1001 '&xor ($a,$b) if ($j<79);', 1002 '&add ($d,&DWP(4*(($j+1)&15),"esp")) if ($j<79);', # X[]+K xfer 1003 '&xor ($a,$c) if ($j<79);', 1004 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 1005 ); 1006} 1007 1008sub bodyx_40_59 () { # ((b^c)&(c^d))^c 1009 # on start $b=((b^c)&(c^d))^c 1010 return &bodyx_20_39() if ($rx==59); $rx++; 1011 ( 1012 '($a,$b,$c,$d,$e)=@V;'. 1013 1014 '&rorx (@T[0],$a,5)', 1015 '&lea ($e,&DWP(0,$e,$b))', 1016 '&rorx ($b,@T[1],7)', # $b>>>2 1017 '&add ($d,&DWP(4*(($j+1)&15),"esp"))', # X[]+K xfer 1018 1019 '&mov (@T[1],$c)', 1020 '&xor ($a,$b)', # b^c for next round 1021 '&xor (@T[1],$b)', # c^d for next round 1022 1023 '&and ($a,@T[1])', 1024 '&add ($e,@T[0])', 1025 '&xor ($a,$b)' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 1026 ); 1027} 1028 1029&set_label("loop",16); 1030 &Xupdate_ssse3_16_31(\&body_00_19); 1031 &Xupdate_ssse3_16_31(\&body_00_19); 1032 &Xupdate_ssse3_16_31(\&body_00_19); 1033 &Xupdate_ssse3_16_31(\&body_00_19); 1034 &Xupdate_ssse3_32_79(\&body_00_19); 1035 &Xupdate_ssse3_32_79(\&body_20_39); 1036 &Xupdate_ssse3_32_79(\&body_20_39); 1037 &Xupdate_ssse3_32_79(\&body_20_39); 1038 &Xupdate_ssse3_32_79(\&body_20_39); 1039 &Xupdate_ssse3_32_79(\&body_20_39); 1040 &Xupdate_ssse3_32_79(\&body_40_59); 1041 &Xupdate_ssse3_32_79(\&body_40_59); 1042 &Xupdate_ssse3_32_79(\&body_40_59); 1043 &Xupdate_ssse3_32_79(\&body_40_59); 1044 &Xupdate_ssse3_32_79(\&body_40_59); 1045 &Xupdate_ssse3_32_79(\&body_20_39); 1046 &Xuplast_ssse3_80(\&body_20_39); # can jump to "done" 1047 1048 $saved_j=$j; @saved_V=@V; 1049 1050 &Xloop_ssse3(\&body_20_39); 1051 &Xloop_ssse3(\&body_20_39); 1052 &Xloop_ssse3(\&body_20_39); 1053 1054 &mov (@T[1],&DWP(192,"esp")); # update context 1055 &add ($A,&DWP(0,@T[1])); 1056 &add (@T[0],&DWP(4,@T[1])); # $b 1057 &add ($C,&DWP(8,@T[1])); 1058 &mov (&DWP(0,@T[1]),$A); 1059 &add ($D,&DWP(12,@T[1])); 1060 &mov (&DWP(4,@T[1]),@T[0]); 1061 &add ($E,&DWP(16,@T[1])); 1062 &mov (&DWP(8,@T[1]),$C); 1063 &mov ($B,$C); 1064 &mov (&DWP(12,@T[1]),$D); 1065 &xor ($B,$D); 1066 &mov (&DWP(16,@T[1]),$E); 1067 &mov (@T[1],@T[0]); 1068 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]); 1069 &and (@T[0],$B); 1070 &mov ($B,$T[1]); 1071 1072 &jmp (&label("loop")); 1073 1074&set_label("done",16); $j=$saved_j; @V=@saved_V; 1075 1076 &Xtail_ssse3(\&body_20_39); 1077 &Xtail_ssse3(\&body_20_39); 1078 &Xtail_ssse3(\&body_20_39); 1079 1080 &mov (@T[1],&DWP(192,"esp")); # update context 1081 &add ($A,&DWP(0,@T[1])); 1082 &mov ("esp",&DWP(192+12,"esp")); # restore %esp 1083 &add (@T[0],&DWP(4,@T[1])); # $b 1084 &add ($C,&DWP(8,@T[1])); 1085 &mov (&DWP(0,@T[1]),$A); 1086 &add ($D,&DWP(12,@T[1])); 1087 &mov (&DWP(4,@T[1]),@T[0]); 1088 &add ($E,&DWP(16,@T[1])); 1089 &mov (&DWP(8,@T[1]),$C); 1090 &mov (&DWP(12,@T[1]),$D); 1091 &mov (&DWP(16,@T[1]),$E); 1092 1093&function_end("_sha1_block_data_order_ssse3"); 1094 1095$rx=0; # reset 1096 1097if ($ymm) { 1098my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded 1099my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4 1100my @V=($A,$B,$C,$D,$E); 1101my $j=0; # hash round 1102my @T=($T,$tmp1); 1103my $inp; 1104 1105my $_rol=sub { &shld(@_[0],@_) }; 1106my $_ror=sub { &shrd(@_[0],@_) }; 1107 1108&function_begin("_sha1_block_data_order_avx"); 1109 &call (&label("pic_point")); # make it PIC! 1110 &set_label("pic_point"); 1111 &blindpop($tmp1); 1112 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 1113&set_label("avx_shortcut"); 1114 &vzeroall(); 1115 1116 &vmovdqa(@X[3],&QWP(0,$tmp1)); # K_00_19 1117 &vmovdqa(@X[4],&QWP(16,$tmp1)); # K_20_39 1118 &vmovdqa(@X[5],&QWP(32,$tmp1)); # K_40_59 1119 &vmovdqa(@X[6],&QWP(48,$tmp1)); # K_60_79 1120 &vmovdqa(@X[2],&QWP(64,$tmp1)); # pbswap mask 1121 1122 &mov ($E,&wparam(0)); # load argument block 1123 &mov ($inp=@T[1],&wparam(1)); 1124 &mov ($D,&wparam(2)); 1125 &mov (@T[0],"esp"); 1126 1127 # stack frame layout 1128 # 1129 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area 1130 # X[4]+K X[5]+K X[6]+K X[7]+K 1131 # X[8]+K X[9]+K X[10]+K X[11]+K 1132 # X[12]+K X[13]+K X[14]+K X[15]+K 1133 # 1134 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area 1135 # X[4] X[5] X[6] X[7] 1136 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19 1137 # 1138 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants 1139 # K_40_59 K_40_59 K_40_59 K_40_59 1140 # K_60_79 K_60_79 K_60_79 K_60_79 1141 # K_00_19 K_00_19 K_00_19 K_00_19 1142 # pbswap mask 1143 # 1144 # +192 ctx # argument block 1145 # +196 inp 1146 # +200 end 1147 # +204 esp 1148 &sub ("esp",208); 1149 &and ("esp",-64); 1150 1151 &vmovdqa(&QWP(112+0,"esp"),@X[4]); # copy constants 1152 &vmovdqa(&QWP(112+16,"esp"),@X[5]); 1153 &vmovdqa(&QWP(112+32,"esp"),@X[6]); 1154 &shl ($D,6); # len*64 1155 &vmovdqa(&QWP(112+48,"esp"),@X[3]); 1156 &add ($D,$inp); # end of input 1157 &vmovdqa(&QWP(112+64,"esp"),@X[2]); 1158 &add ($inp,64); 1159 &mov (&DWP(192+0,"esp"),$E); # save argument block 1160 &mov (&DWP(192+4,"esp"),$inp); 1161 &mov (&DWP(192+8,"esp"),$D); 1162 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp 1163 1164 &mov ($A,&DWP(0,$E)); # load context 1165 &mov ($B,&DWP(4,$E)); 1166 &mov ($C,&DWP(8,$E)); 1167 &mov ($D,&DWP(12,$E)); 1168 &mov ($E,&DWP(16,$E)); 1169 &mov (@T[0],$B); # magic seed 1170 1171 &vmovdqu(@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3] 1172 &vmovdqu(@X[-3&7],&QWP(-48,$inp)); 1173 &vmovdqu(@X[-2&7],&QWP(-32,$inp)); 1174 &vmovdqu(@X[-1&7],&QWP(-16,$inp)); 1175 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap 1176 &vpshufb(@X[-3&7],@X[-3&7],@X[2]); 1177 &vpshufb(@X[-2&7],@X[-2&7],@X[2]); 1178 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 1179 &vpshufb(@X[-1&7],@X[-1&7],@X[2]); 1180 &vpaddd (@X[0],@X[-4&7],@X[3]); # add K_00_19 1181 &vpaddd (@X[1],@X[-3&7],@X[3]); 1182 &vpaddd (@X[2],@X[-2&7],@X[3]); 1183 &vmovdqa(&QWP(0,"esp"),@X[0]); # X[]+K xfer to IALU 1184 &mov (@T[1],$C); 1185 &vmovdqa(&QWP(0+16,"esp"),@X[1]); 1186 &xor (@T[1],$D); 1187 &vmovdqa(&QWP(0+32,"esp"),@X[2]); 1188 &and (@T[0],@T[1]); 1189 &jmp (&label("loop")); 1190 1191sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4 1192{ use integer; 1193 my $body = shift; 1194 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions 1195 my ($a,$b,$c,$d,$e); 1196 1197 eval(shift(@insns)); 1198 eval(shift(@insns)); 1199 &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]" 1200 eval(shift(@insns)); 1201 eval(shift(@insns)); 1202 1203 &vpaddd (@X[3],@X[3],@X[-1&7]); 1204 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer 1205 eval(shift(@insns)); 1206 eval(shift(@insns)); 1207 &vpsrldq(@X[2],@X[-1&7],4); # "X[-3]", 3 dwords 1208 eval(shift(@insns)); 1209 eval(shift(@insns)); 1210 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]" 1211 eval(shift(@insns)); 1212 eval(shift(@insns)); 1213 1214 &vpxor (@X[2],@X[2],@X[-2&7]); # "X[-3]"^"X[-8]" 1215 eval(shift(@insns)); 1216 eval(shift(@insns)); 1217 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 1218 eval(shift(@insns)); 1219 eval(shift(@insns)); 1220 1221 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]" 1222 eval(shift(@insns)); 1223 eval(shift(@insns)); 1224 eval(shift(@insns)); 1225 eval(shift(@insns)); 1226 1227 &vpsrld (@X[2],@X[0],31); 1228 eval(shift(@insns)); 1229 eval(shift(@insns)); 1230 eval(shift(@insns)); 1231 eval(shift(@insns)); 1232 1233 &vpslldq(@X[4],@X[0],12); # "X[0]"<<96, extract one dword 1234 &vpaddd (@X[0],@X[0],@X[0]); 1235 eval(shift(@insns)); 1236 eval(shift(@insns)); 1237 eval(shift(@insns)); 1238 eval(shift(@insns)); 1239 1240 &vpsrld (@X[3],@X[4],30); 1241 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=1 1242 eval(shift(@insns)); 1243 eval(shift(@insns)); 1244 eval(shift(@insns)); 1245 eval(shift(@insns)); 1246 1247 &vpslld (@X[4],@X[4],2); 1248 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer 1249 eval(shift(@insns)); 1250 eval(shift(@insns)); 1251 &vpxor (@X[0],@X[0],@X[3]); 1252 eval(shift(@insns)); 1253 eval(shift(@insns)); 1254 eval(shift(@insns)); 1255 eval(shift(@insns)); 1256 1257 &vpxor (@X[0],@X[0],@X[4]); # "X[0]"^=("X[0]"<<96)<<<2 1258 eval(shift(@insns)); 1259 eval(shift(@insns)); 1260 &vmovdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX 1261 eval(shift(@insns)); 1262 eval(shift(@insns)); 1263 1264 foreach (@insns) { eval; } # remaining instructions [if any] 1265 1266 $Xi++; push(@X,shift(@X)); # "rotate" X[] 1267} 1268 1269sub Xupdate_avx_32_79() 1270{ use integer; 1271 my $body = shift; 1272 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions 1273 my ($a,$b,$c,$d,$e); 1274 1275 &vpalignr(@X[2],@X[-1&7],@X[-2&7],8); # compose "X[-6]" 1276 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" 1277 eval(shift(@insns)); # body_20_39 1278 eval(shift(@insns)); 1279 eval(shift(@insns)); 1280 eval(shift(@insns)); # rol 1281 1282 &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]" 1283 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer 1284 eval(shift(@insns)); 1285 eval(shift(@insns)); 1286 if ($Xi%5) { 1287 &vmovdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX... 1288 } else { # ... or load next one 1289 &vmovdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp")); 1290 } 1291 &vpaddd (@X[3],@X[3],@X[-1&7]); 1292 eval(shift(@insns)); # ror 1293 eval(shift(@insns)); 1294 1295 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-6]" 1296 eval(shift(@insns)); # body_20_39 1297 eval(shift(@insns)); 1298 eval(shift(@insns)); 1299 eval(shift(@insns)); # rol 1300 1301 &vpsrld (@X[2],@X[0],30); 1302 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 1303 eval(shift(@insns)); 1304 eval(shift(@insns)); 1305 eval(shift(@insns)); # ror 1306 eval(shift(@insns)); 1307 1308 &vpslld (@X[0],@X[0],2); 1309 eval(shift(@insns)); # body_20_39 1310 eval(shift(@insns)); 1311 eval(shift(@insns)); 1312 eval(shift(@insns)); # rol 1313 eval(shift(@insns)); 1314 eval(shift(@insns)); 1315 eval(shift(@insns)); # ror 1316 eval(shift(@insns)); 1317 1318 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=2 1319 eval(shift(@insns)); # body_20_39 1320 eval(shift(@insns)); 1321 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer 1322 eval(shift(@insns)); 1323 eval(shift(@insns)); # rol 1324 eval(shift(@insns)); 1325 eval(shift(@insns)); 1326 eval(shift(@insns)); # ror 1327 eval(shift(@insns)); 1328 1329 foreach (@insns) { eval; } # remaining instructions 1330 1331 $Xi++; push(@X,shift(@X)); # "rotate" X[] 1332} 1333 1334sub Xuplast_avx_80() 1335{ use integer; 1336 my $body = shift; 1337 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 1338 my ($a,$b,$c,$d,$e); 1339 1340 eval(shift(@insns)); 1341 &vpaddd (@X[3],@X[3],@X[-1&7]); 1342 eval(shift(@insns)); 1343 eval(shift(@insns)); 1344 eval(shift(@insns)); 1345 eval(shift(@insns)); 1346 1347 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU 1348 1349 foreach (@insns) { eval; } # remaining instructions 1350 1351 &mov ($inp=@T[1],&DWP(192+4,"esp")); 1352 &cmp ($inp,&DWP(192+8,"esp")); 1353 &je (&label("done")); 1354 1355 &vmovdqa(@X[3],&QWP(112+48,"esp")); # K_00_19 1356 &vmovdqa(@X[2],&QWP(112+64,"esp")); # pbswap mask 1357 &vmovdqu(@X[-4&7],&QWP(0,$inp)); # load input 1358 &vmovdqu(@X[-3&7],&QWP(16,$inp)); 1359 &vmovdqu(@X[-2&7],&QWP(32,$inp)); 1360 &vmovdqu(@X[-1&7],&QWP(48,$inp)); 1361 &add ($inp,64); 1362 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap 1363 &mov (&DWP(192+4,"esp"),$inp); 1364 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 1365 1366 $Xi=0; 1367} 1368 1369sub Xloop_avx() 1370{ use integer; 1371 my $body = shift; 1372 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 1373 my ($a,$b,$c,$d,$e); 1374 1375 eval(shift(@insns)); 1376 eval(shift(@insns)); 1377 &vpshufb (@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]); 1378 eval(shift(@insns)); 1379 eval(shift(@insns)); 1380 &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@X[3]); 1381 eval(shift(@insns)); 1382 eval(shift(@insns)); 1383 eval(shift(@insns)); 1384 eval(shift(@insns)); 1385 &vmovdqa (&QWP(0+16*$Xi,"esp"),@X[$Xi&7]); # X[]+K xfer to IALU 1386 eval(shift(@insns)); 1387 eval(shift(@insns)); 1388 1389 foreach (@insns) { eval; } 1390 $Xi++; 1391} 1392 1393sub Xtail_avx() 1394{ use integer; 1395 my $body = shift; 1396 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 1397 my ($a,$b,$c,$d,$e); 1398 1399 foreach (@insns) { eval; } 1400} 1401 1402&set_label("loop",16); 1403 &Xupdate_avx_16_31(\&body_00_19); 1404 &Xupdate_avx_16_31(\&body_00_19); 1405 &Xupdate_avx_16_31(\&body_00_19); 1406 &Xupdate_avx_16_31(\&body_00_19); 1407 &Xupdate_avx_32_79(\&body_00_19); 1408 &Xupdate_avx_32_79(\&body_20_39); 1409 &Xupdate_avx_32_79(\&body_20_39); 1410 &Xupdate_avx_32_79(\&body_20_39); 1411 &Xupdate_avx_32_79(\&body_20_39); 1412 &Xupdate_avx_32_79(\&body_20_39); 1413 &Xupdate_avx_32_79(\&body_40_59); 1414 &Xupdate_avx_32_79(\&body_40_59); 1415 &Xupdate_avx_32_79(\&body_40_59); 1416 &Xupdate_avx_32_79(\&body_40_59); 1417 &Xupdate_avx_32_79(\&body_40_59); 1418 &Xupdate_avx_32_79(\&body_20_39); 1419 &Xuplast_avx_80(\&body_20_39); # can jump to "done" 1420 1421 $saved_j=$j; @saved_V=@V; 1422 1423 &Xloop_avx(\&body_20_39); 1424 &Xloop_avx(\&body_20_39); 1425 &Xloop_avx(\&body_20_39); 1426 1427 &mov (@T[1],&DWP(192,"esp")); # update context 1428 &add ($A,&DWP(0,@T[1])); 1429 &add (@T[0],&DWP(4,@T[1])); # $b 1430 &add ($C,&DWP(8,@T[1])); 1431 &mov (&DWP(0,@T[1]),$A); 1432 &add ($D,&DWP(12,@T[1])); 1433 &mov (&DWP(4,@T[1]),@T[0]); 1434 &add ($E,&DWP(16,@T[1])); 1435 &mov ($B,$C); 1436 &mov (&DWP(8,@T[1]),$C); 1437 &xor ($B,$D); 1438 &mov (&DWP(12,@T[1]),$D); 1439 &mov (&DWP(16,@T[1]),$E); 1440 &mov (@T[1],@T[0]); 1441 &and (@T[0],$B); 1442 &mov ($B,@T[1]); 1443 1444 &jmp (&label("loop")); 1445 1446&set_label("done",16); $j=$saved_j; @V=@saved_V; 1447 1448 &Xtail_avx(\&body_20_39); 1449 &Xtail_avx(\&body_20_39); 1450 &Xtail_avx(\&body_20_39); 1451 1452 &vzeroall(); 1453 1454 &mov (@T[1],&DWP(192,"esp")); # update context 1455 &add ($A,&DWP(0,@T[1])); 1456 &mov ("esp",&DWP(192+12,"esp")); # restore %esp 1457 &add (@T[0],&DWP(4,@T[1])); # $b 1458 &add ($C,&DWP(8,@T[1])); 1459 &mov (&DWP(0,@T[1]),$A); 1460 &add ($D,&DWP(12,@T[1])); 1461 &mov (&DWP(4,@T[1]),@T[0]); 1462 &add ($E,&DWP(16,@T[1])); 1463 &mov (&DWP(8,@T[1]),$C); 1464 &mov (&DWP(12,@T[1]),$D); 1465 &mov (&DWP(16,@T[1]),$E); 1466&function_end("_sha1_block_data_order_avx"); 1467} 1468&set_label("K_XX_XX",64); 1469&data_word(0x5a827999,0x5a827999,0x5a827999,0x5a827999); # K_00_19 1470&data_word(0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1); # K_20_39 1471&data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc); # K_40_59 1472&data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6); # K_60_79 1473&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # pbswap mask 1474&data_byte(0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0); 1475} 1476&asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>"); 1477 1478&asm_finish(); 1479 1480close STDOUT; 1481