diff options
author | Josh Soref <jsoref@users.noreply.github.com> | 2017-11-11 19:03:10 -0500 |
---|---|---|
committer | Rich Salz <rsalz@openssl.org> | 2017-11-11 19:03:10 -0500 |
commit | 46f4e1bec51dc96fa275c168752aa34359d9ee51 (patch) | |
tree | c80b737d1fff479fd88f6c41175187ebad868299 /crypto/aes | |
parent | b4d0fa49d9d1a43792e58b0c8066bb23b9e53ef4 (diff) |
Many spelling fixes/typo's corrected.
Around 138 distinct errors found and fixed; thanks!
Reviewed-by: Kurt Roeckx <kurt@roeckx.be>
Reviewed-by: Tim Hudson <tjh@openssl.org>
Reviewed-by: Rich Salz <rsalz@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/3459)
Diffstat (limited to 'crypto/aes')
-rwxr-xr-x | crypto/aes/asm/aes-586.pl | 16 | ||||
-rw-r--r-- | crypto/aes/asm/aes-mips.pl | 2 | ||||
-rw-r--r-- | crypto/aes/asm/aes-parisc.pl | 2 | ||||
-rw-r--r-- | crypto/aes/asm/aes-s390x.pl | 2 | ||||
-rwxr-xr-x | crypto/aes/asm/aes-x86_64.pl | 2 | ||||
-rw-r--r-- | crypto/aes/asm/aesfx-sparcv9.pl | 2 | ||||
-rw-r--r-- | crypto/aes/asm/aesni-mb-x86_64.pl | 8 | ||||
-rw-r--r-- | crypto/aes/asm/aesni-x86.pl | 2 | ||||
-rw-r--r-- | crypto/aes/asm/aesni-x86_64.pl | 16 | ||||
-rw-r--r-- | crypto/aes/asm/aest4-sparcv9.pl | 2 | ||||
-rwxr-xr-x | crypto/aes/asm/aesv8-armx.pl | 4 | ||||
-rwxr-xr-x | crypto/aes/asm/vpaes-armv8.pl | 4 |
12 files changed, 31 insertions, 31 deletions
diff --git a/crypto/aes/asm/aes-586.pl b/crypto/aes/asm/aes-586.pl index 20c19e98bf..29059edf8b 100755 --- a/crypto/aes/asm/aes-586.pl +++ b/crypto/aes/asm/aes-586.pl @@ -55,8 +55,8 @@ # better performance on most recent ยต-archs... # # Third version adds AES_cbc_encrypt implementation, which resulted in -# up to 40% performance imrovement of CBC benchmark results. 40% was -# observed on P4 core, where "overall" imrovement coefficient, i.e. if +# up to 40% performance improvement of CBC benchmark results. 40% was +# observed on P4 core, where "overall" improvement coefficient, i.e. if # compared to PIC generated by GCC and in CBC mode, was observed to be # as large as 4x:-) CBC performance is virtually identical to ECB now # and on some platforms even better, e.g. 17.6 "small" cycles/byte on @@ -159,7 +159,7 @@ # combinations then attack becomes infeasible. This is why revised # AES_cbc_encrypt "dares" to switch to larger S-box when larger chunk # of data is to be processed in one stroke. The current size limit of -# 512 bytes is chosen to provide same [diminishigly low] probability +# 512 bytes is chosen to provide same [diminishingly low] probability # for cache-line to remain untouched in large chunk operation with # large S-box as for single block operation with compact S-box and # surely needs more careful consideration... @@ -171,12 +171,12 @@ # yield execution to process performing AES just before timer fires # off the scheduler, immediately regain control of CPU and analyze the # cache state. For this attack to be efficient attacker would have to -# effectively slow down the operation by several *orders* of magnitute, +# effectively slow down the operation by several *orders* of magnitude, # by ratio of time slice to duration of handful of AES rounds, which # unlikely to remain unnoticed. Not to mention that this also means -# that he would spend correspondigly more time to collect enough +# that he would spend correspondingly more time to collect enough # statistical data to mount the attack. It's probably appropriate to -# say that if adeversary reckons that this attack is beneficial and +# say that if adversary reckons that this attack is beneficial and # risks to be noticed, you probably have larger problems having him # mere opportunity. In other words suggested code design expects you # to preclude/mitigate this attack by overall system security design. @@ -240,7 +240,7 @@ $small_footprint=1; # $small_footprint=1 code is ~5% slower [on # contention and in hope to "collect" 5% back # in real-life applications... -$vertical_spin=0; # shift "verticaly" defaults to 0, because of +$vertical_spin=0; # shift "vertically" defaults to 0, because of # its proof-of-concept status... # Note that there is no decvert(), as well as last encryption round is # performed with "horizontal" shifts. This is because this "vertical" @@ -1606,7 +1606,7 @@ sub decstep() # no instructions are reordered, as performance appears # optimal... or rather that all attempts to reorder didn't # result in better performance [which by the way is not a - # bit lower than ecryption]. + # bit lower than encryption]. if($i==3) { &mov ($key,$__key); } else { &mov ($out,$s[0]); } &and ($out,0xFF); diff --git a/crypto/aes/asm/aes-mips.pl b/crypto/aes/asm/aes-mips.pl index ba3e4545df..d81d76d77a 100644 --- a/crypto/aes/asm/aes-mips.pl +++ b/crypto/aes/asm/aes-mips.pl @@ -120,7 +120,7 @@ my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2); my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7,$t8,$t9,$t10,$t11) = map("\$$_",(12..23)); my ($key0,$cnt)=($gp,$fp); -# instuction ordering is "stolen" from output from MIPSpro assembler +# instruction ordering is "stolen" from output from MIPSpro assembler # invoked with -mips3 -O3 arguments... $code.=<<___; .align 5 diff --git a/crypto/aes/asm/aes-parisc.pl b/crypto/aes/asm/aes-parisc.pl index fb754eb460..b688ab3050 100644 --- a/crypto/aes/asm/aes-parisc.pl +++ b/crypto/aes/asm/aes-parisc.pl @@ -1015,7 +1015,7 @@ ___ foreach (split("\n",$code)) { s/\`([^\`]*)\`/eval $1/ge; - # translate made up instructons: _ror, _srm + # translate made up instructions: _ror, _srm s/_ror(\s+)(%r[0-9]+),/shd$1$2,$2,/ or s/_srm(\s+%r[0-9]+),([0-9]+),/ diff --git a/crypto/aes/asm/aes-s390x.pl b/crypto/aes/asm/aes-s390x.pl index 1495917d26..0ef1f6b50a 100644 --- a/crypto/aes/asm/aes-s390x.pl +++ b/crypto/aes/asm/aes-s390x.pl @@ -44,7 +44,7 @@ # minimize/avoid Address Generation Interlock hazard and to favour # dual-issue z10 pipeline. This gave ~25% improvement on z10 and # almost 50% on z9. The gain is smaller on z10, because being dual- -# issue z10 makes it improssible to eliminate the interlock condition: +# issue z10 makes it impossible to eliminate the interlock condition: # critial path is not long enough. Yet it spends ~24 cycles per byte # processed with 128-bit key. # diff --git a/crypto/aes/asm/aes-x86_64.pl b/crypto/aes/asm/aes-x86_64.pl index c24a5510bd..4d1dc9c701 100755 --- a/crypto/aes/asm/aes-x86_64.pl +++ b/crypto/aes/asm/aes-x86_64.pl @@ -2018,7 +2018,7 @@ AES_cbc_encrypt: lea ($key,%rax),%rax mov %rax,$keyend - # pick Te4 copy which can't "overlap" with stack frame or key scdedule + # pick Te4 copy which can't "overlap" with stack frame or key schedule lea 2048($sbox),$sbox lea 768-8(%rsp),%rax sub $sbox,%rax diff --git a/crypto/aes/asm/aesfx-sparcv9.pl b/crypto/aes/asm/aesfx-sparcv9.pl index 04b3cf7116..9ddf0b4b00 100644 --- a/crypto/aes/asm/aesfx-sparcv9.pl +++ b/crypto/aes/asm/aesfx-sparcv9.pl @@ -22,7 +22,7 @@ # April 2016 # # Add "teaser" CBC and CTR mode-specific subroutines. "Teaser" means -# that parallelizeable nature of CBC decrypt and CTR is not utilized +# that parallelizable nature of CBC decrypt and CTR is not utilized # yet. CBC encrypt on the other hand is as good as it can possibly # get processing one byte in 4.1 cycles with 128-bit key on SPARC64 X. # This is ~6x faster than pure software implementation... diff --git a/crypto/aes/asm/aesni-mb-x86_64.pl b/crypto/aes/asm/aesni-mb-x86_64.pl index c4151feb7e..1f356d2d3f 100644 --- a/crypto/aes/asm/aesni-mb-x86_64.pl +++ b/crypto/aes/asm/aesni-mb-x86_64.pl @@ -1325,10 +1325,10 @@ se_handler: mov -48(%rax),%r15 mov %rbx,144($context) # restore context->Rbx mov %rbp,160($context) # restore context->Rbp - mov %r12,216($context) # restore cotnext->R12 - mov %r13,224($context) # restore cotnext->R13 - mov %r14,232($context) # restore cotnext->R14 - mov %r15,240($context) # restore cotnext->R15 + mov %r12,216($context) # restore context->R12 + mov %r13,224($context) # restore context->R13 + mov %r14,232($context) # restore context->R14 + mov %r15,240($context) # restore context->R15 lea -56-10*16(%rax),%rsi lea 512($context),%rdi # &context.Xmm6 diff --git a/crypto/aes/asm/aesni-x86.pl b/crypto/aes/asm/aesni-x86.pl index 81c1cd651b..b351fca28e 100644 --- a/crypto/aes/asm/aesni-x86.pl +++ b/crypto/aes/asm/aesni-x86.pl @@ -239,7 +239,7 @@ sub aesni_generate1 # fully unrolled loop # can schedule aes[enc|dec] every cycle optimal interleave factor # equals to corresponding instructions latency. 8x is optimal for # * Bridge, but it's unfeasible to accommodate such implementation -# in XMM registers addreassable in 32-bit mode and therefore maximum +# in XMM registers addressable in 32-bit mode and therefore maximum # of 6x is used instead... sub aesni_generate2 diff --git a/crypto/aes/asm/aesni-x86_64.pl b/crypto/aes/asm/aesni-x86_64.pl index c5c3614eee..2a202c53e5 100644 --- a/crypto/aes/asm/aesni-x86_64.pl +++ b/crypto/aes/asm/aesni-x86_64.pl @@ -60,7 +60,7 @@ # identical to CBC, because CBC-MAC is essentially CBC encrypt without # saving output. CCM CTR "stays invisible," because it's neatly # interleaved wih CBC-MAC. This provides ~30% improvement over -# "straghtforward" CCM implementation with CTR and CBC-MAC performed +# "straightforward" CCM implementation with CTR and CBC-MAC performed # disjointly. Parallelizable modes practically achieve the theoretical # limit. # @@ -143,14 +143,14 @@ # asymptotic, if it can be surpassed, isn't it? What happens there? # Rewind to CBC paragraph for the answer. Yes, out-of-order execution # magic is responsible for this. Processor overlaps not only the -# additional instructions with AES ones, but even AES instuctions +# additional instructions with AES ones, but even AES instructions # processing adjacent triplets of independent blocks. In the 6x case # additional instructions still claim disproportionally small amount # of additional cycles, but in 8x case number of instructions must be # a tad too high for out-of-order logic to cope with, and AES unit # remains underutilized... As you can see 8x interleave is hardly # justifiable, so there no need to feel bad that 32-bit aesni-x86.pl -# utilizies 6x interleave because of limited register bank capacity. +# utilizes 6x interleave because of limited register bank capacity. # # Higher interleave factors do have negative impact on Westmere # performance. While for ECB mode it's negligible ~1.5%, other @@ -1550,7 +1550,7 @@ $code.=<<___; sub \$8,$len jnc .Lctr32_loop8 # loop if $len-=8 didn't borrow - add \$8,$len # restore real remainig $len + add \$8,$len # restore real remaining $len jz .Lctr32_done # done if ($len==0) lea -0x80($key),$key @@ -1667,7 +1667,7 @@ $code.=<<___; movups $inout2,0x20($out) # $len was 3, stop store .Lctr32_done: - xorps %xmm0,%xmm0 # clear regiser bank + xorps %xmm0,%xmm0 # clear register bank xor $key0,$key0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 @@ -1856,7 +1856,7 @@ $code.=<<___; lea `16*6`($inp),$inp pxor $twmask,$inout5 - pxor $twres,@tweak[0] # calclulate tweaks^round[last] + pxor $twres,@tweak[0] # calculate tweaks^round[last] aesenc $rndkey1,$inout4 pxor $twres,@tweak[1] movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks^round[last] @@ -2342,7 +2342,7 @@ $code.=<<___; lea `16*6`($inp),$inp pxor $twmask,$inout5 - pxor $twres,@tweak[0] # calclulate tweaks^round[last] + pxor $twres,@tweak[0] # calculate tweaks^round[last] aesdec $rndkey1,$inout4 pxor $twres,@tweak[1] movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks^last round key @@ -4515,7 +4515,7 @@ __aesni_set_encrypt_key: .align 16 .L14rounds: - movups 16($inp),%xmm2 # remaning half of *userKey + movups 16($inp),%xmm2 # remaining half of *userKey mov \$13,$bits # 14 rounds for 256 lea 16(%rax),%rax cmp \$`1<<28`,%r10d # AVX, but no XOP diff --git a/crypto/aes/asm/aest4-sparcv9.pl b/crypto/aes/asm/aest4-sparcv9.pl index 8d2b33d73e..54d0c58821 100644 --- a/crypto/aes/asm/aest4-sparcv9.pl +++ b/crypto/aes/asm/aest4-sparcv9.pl @@ -44,7 +44,7 @@ # instructions with those on critical path. Amazing! # # As with Intel AES-NI, question is if it's possible to improve -# performance of parallelizeable modes by interleaving round +# performance of parallelizable modes by interleaving round # instructions. Provided round instruction latency and throughput # optimal interleave factor is 2. But can we expect 2x performance # improvement? Well, as round instructions can be issued one per diff --git a/crypto/aes/asm/aesv8-armx.pl b/crypto/aes/asm/aesv8-armx.pl index a7947af486..385b31fd54 100755 --- a/crypto/aes/asm/aesv8-armx.pl +++ b/crypto/aes/asm/aesv8-armx.pl @@ -929,7 +929,7 @@ if ($flavour =~ /64/) { ######## 64-bit code s/^(\s+)v/$1/o or # strip off v prefix s/\bbx\s+lr\b/ret/o; - # fix up remainig legacy suffixes + # fix up remaining legacy suffixes s/\.[ui]?8//o; m/\],#8/o and s/\.16b/\.8b/go; s/\.[ui]?32//o and s/\.16b/\.4s/go; @@ -988,7 +988,7 @@ if ($flavour =~ /64/) { ######## 64-bit code s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers s/\/\/\s?/@ /o; # new->old style commentary - # fix up remainig new-style suffixes + # fix up remaining new-style suffixes s/\{q([0-9]+)\},\s*\[(.+)\],#8/sprintf "{d%d},[$2]!",2*$1/eo or s/\],#[0-9]+/]!/o; diff --git a/crypto/aes/asm/vpaes-armv8.pl b/crypto/aes/asm/vpaes-armv8.pl index 2e704a2124..5131e13a09 100755 --- a/crypto/aes/asm/vpaes-armv8.pl +++ b/crypto/aes/asm/vpaes-armv8.pl @@ -31,7 +31,7 @@ # Apple A7(***) 22.7(**) 10.9/14.3 [8.45/10.0 ] # Mongoose(***) 26.3(**) 21.0/25.0(**) [13.3/16.8 ] # -# (*) ECB denotes approximate result for parallelizeable modes +# (*) ECB denotes approximate result for parallelizable modes # such as CBC decrypt, CTR, etc.; # (**) these results are worse than scalar compiler-generated # code, but it's constant-time and therefore preferred; @@ -137,7 +137,7 @@ _vpaes_consts: .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 -.asciz "Vector Permutaion AES for ARMv8, Mike Hamburg (Stanford University)" +.asciz "Vector Permutation AES for ARMv8, Mike Hamburg (Stanford University)" .size _vpaes_consts,.-_vpaes_consts .align 6 ___ |