1#! /usr/bin/env perl 2# Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved. 3# 4# Licensed under the OpenSSL license (the "License"). You may not use 5# this file except in compliance with the License. You can obtain a copy 6# in the file LICENSE in the source distribution or at 7# https://www.openssl.org/source/license.html 8 9 10###################################################################### 11## Constant-time SSSE3 AES core implementation. 12## version 0.1 13## 14## By Mike Hamburg (Stanford University), 2009 15## Public domain. 16## 17## For details see http://shiftleft.org/papers/vector_aes/ and 18## http://crypto.stanford.edu/vpaes/. 19## 20###################################################################### 21# ARMv8 NEON adaptation by <[email protected]> 22# 23# Reason for undertaken effort is that there is at least one popular 24# SoC based on Cortex-A53 that doesn't have crypto extensions. 25# 26# CBC enc ECB enc/dec(*) [bit-sliced enc/dec] 27# Cortex-A53 21.5 18.1/20.6 [17.5/19.8 ] 28# Cortex-A57 36.0(**) 20.4/24.9(**) [14.4/16.6 ] 29# X-Gene 45.9(**) 45.8/57.7(**) [33.1/37.6(**) ] 30# Denver(***) 16.6(**) 15.1/17.8(**) [8.80/9.93 ] 31# Apple A7(***) 22.7(**) 10.9/14.3 [8.45/10.0 ] 32# Mongoose(***) 26.3(**) 21.0/25.0(**) [13.3/16.8 ] 33# 34# (*) ECB denotes approximate result for parallelizable modes 35# such as CBC decrypt, CTR, etc.; 36# (**) these results are worse than scalar compiler-generated 37# code, but it's constant-time and therefore preferred; 38# (***) presented for reference/comparison purposes; 39 40$flavour = shift; 41while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} 42 43$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 44( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or 45( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or 46die "can't locate arm-xlate.pl"; 47 48open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; 49*STDOUT=*OUT; 50 51$code.=<<___; 52#include <openssl/arm_arch.h> 53 54.section .rodata 55 56.type _vpaes_consts,%object 57.align 7 // totally strategic alignment 58_vpaes_consts: 59.Lk_mc_forward: // mc_forward 60 .quad 0x0407060500030201, 0x0C0F0E0D080B0A09 61 .quad 0x080B0A0904070605, 0x000302010C0F0E0D 62 .quad 0x0C0F0E0D080B0A09, 0x0407060500030201 63 .quad 0x000302010C0F0E0D, 0x080B0A0904070605 64.Lk_mc_backward:// mc_backward 65 .quad 0x0605040702010003, 0x0E0D0C0F0A09080B 66 .quad 0x020100030E0D0C0F, 0x0A09080B06050407 67 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003 68 .quad 0x0A09080B06050407, 0x020100030E0D0C0F 69.Lk_sr: // sr 70 .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 71 .quad 0x030E09040F0A0500, 0x0B06010C07020D08 72 .quad 0x0F060D040B020900, 0x070E050C030A0108 73 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508 74 75// 76// "Hot" constants 77// 78.Lk_inv: // inv, inva 79 .quad 0x0E05060F0D080180, 0x040703090A0B0C02 80 .quad 0x01040A060F0B0780, 0x030D0E0C02050809 81.Lk_ipt: // input transform (lo, hi) 82 .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 83 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 84.Lk_sbo: // sbou, sbot 85 .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 86 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA 87.Lk_sb1: // sb1u, sb1t 88 .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF 89 .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 90.Lk_sb2: // sb2u, sb2t 91 .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A 92 .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD 93 94// 95// Decryption stuff 96// 97.Lk_dipt: // decryption input transform 98 .quad 0x0F505B040B545F00, 0x154A411E114E451A 99 .quad 0x86E383E660056500, 0x12771772F491F194 100.Lk_dsbo: // decryption sbox final output 101 .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D 102 .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C 103.Lk_dsb9: // decryption sbox output *9*u, *9*t 104 .quad 0x851C03539A86D600, 0xCAD51F504F994CC9 105 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 106.Lk_dsbd: // decryption sbox output *D*u, *D*t 107 .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 108 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 109.Lk_dsbb: // decryption sbox output *B*u, *B*t 110 .quad 0xD022649296B44200, 0x602646F6B0F2D404 111 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B 112.Lk_dsbe: // decryption sbox output *E*u, *E*t 113 .quad 0x46F2929626D4D000, 0x2242600464B4F6B0 114 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 115 116// 117// Key schedule constants 118// 119.Lk_dksd: // decryption key schedule: invskew x*D 120 .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 121 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E 122.Lk_dksb: // decryption key schedule: invskew x*B 123 .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 124 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 125.Lk_dkse: // decryption key schedule: invskew x*E + 0x63 126 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 127 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 128.Lk_dks9: // decryption key schedule: invskew x*9 129 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC 130 .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE 131 132.Lk_rcon: // rcon 133 .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 134 135.Lk_opt: // output transform 136 .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 137 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 138.Lk_deskew: // deskew tables: inverts the sbox's "skew" 139 .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A 140 .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 141 142.asciz "Vector Permutation AES for ARMv8, Mike Hamburg (Stanford University)" 143.size _vpaes_consts,.-_vpaes_consts 144.align 6 145 146.text 147___ 148 149{ 150my ($inp,$out,$key) = map("x$_",(0..2)); 151 152my ($invlo,$invhi,$iptlo,$ipthi,$sbou,$sbot) = map("v$_.16b",(18..23)); 153my ($sb1u,$sb1t,$sb2u,$sb2t) = map("v$_.16b",(24..27)); 154my ($sb9u,$sb9t,$sbdu,$sbdt,$sbbu,$sbbt,$sbeu,$sbet)=map("v$_.16b",(24..31)); 155 156$code.=<<___; 157## 158## _aes_preheat 159## 160## Fills register %r10 -> .aes_consts (so you can -fPIC) 161## and %xmm9-%xmm15 as specified below. 162## 163.type _vpaes_encrypt_preheat,%function 164.align 4 165_vpaes_encrypt_preheat: 166 adrp x10, :pg_hi21:.Lk_inv 167 add x10, x10, :lo12:.Lk_inv 168 movi v17.16b, #0x0f 169 ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv 170 ld1 {v20.2d-v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo 171 ld1 {v24.2d-v27.2d}, [x10] // .Lk_sb1, .Lk_sb2 172 ret 173.size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat 174 175## 176## _aes_encrypt_core 177## 178## AES-encrypt %xmm0. 179## 180## Inputs: 181## %xmm0 = input 182## %xmm9-%xmm15 as in _vpaes_preheat 183## (%rdx) = scheduled keys 184## 185## Output in %xmm0 186## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax 187## Preserves %xmm6 - %xmm8 so you get some local vectors 188## 189## 190.type _vpaes_encrypt_core,%function 191.align 4 192_vpaes_encrypt_core: 193 mov x9, $key 194 ldr w8, [$key,#240] // pull rounds 195 adrp x11, :pg_hi21:.Lk_mc_forward+16 196 add x11, x11, :lo12:.Lk_mc_forward+16 197 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo 198 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 199 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 200 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 201 tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 202 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 203 tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 204 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 205 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 206 b .Lenc_entry 207 208.align 4 209.Lenc_loop: 210 // middle of middle round 211 add x10, x11, #0x40 212 tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u 213 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 214 tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t 215 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 216 tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u 217 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 218 tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t 219 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 220 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B 221 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A 222 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D 223 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B 224 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C 225 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D 226 and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4 227 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D 228 sub w8, w8, #1 // nr-- 229 230.Lenc_entry: 231 // top of round 232 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k 233 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i 234 tbl v5.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k 235 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 236 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 237 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 238 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 239 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 240 tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 241 tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 242 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 243 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 244 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 245 cbnz w8, .Lenc_loop 246 247 // middle of last round 248 add x10, x11, #0x80 249 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo 250 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 251 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 252 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] 253 tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t 254 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 255 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 256 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 257 ret 258.size _vpaes_encrypt_core,.-_vpaes_encrypt_core 259 260.globl vpaes_encrypt 261.type vpaes_encrypt,%function 262.align 4 263vpaes_encrypt: 264 AARCH64_SIGN_LINK_REGISTER 265 stp x29,x30,[sp,#-16]! 266 add x29,sp,#0 267 268 ld1 {v7.16b}, [$inp] 269 bl _vpaes_encrypt_preheat 270 bl _vpaes_encrypt_core 271 st1 {v0.16b}, [$out] 272 273 ldp x29,x30,[sp],#16 274 AARCH64_VALIDATE_LINK_REGISTER 275 ret 276.size vpaes_encrypt,.-vpaes_encrypt 277 278.type _vpaes_encrypt_2x,%function 279.align 4 280_vpaes_encrypt_2x: 281 mov x9, $key 282 ldr w8, [$key,#240] // pull rounds 283 adrp x11, :pg_hi21:.Lk_mc_forward+16 284 add x11, x11, :lo12:.Lk_mc_forward+16 285 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo 286 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 287 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 288 ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 289 and v9.16b, v15.16b, v17.16b 290 ushr v8.16b, v15.16b, #4 291 tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 292 tbl v9.16b, {$iptlo}, v9.16b 293 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 294 tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 295 tbl v10.16b, {$ipthi}, v8.16b 296 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 297 eor v8.16b, v9.16b, v16.16b 298 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 299 eor v8.16b, v8.16b, v10.16b 300 b .Lenc_2x_entry 301 302.align 4 303.Lenc_2x_loop: 304 // middle of middle round 305 add x10, x11, #0x40 306 tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u 307 tbl v12.16b, {$sb1t}, v10.16b 308 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 309 tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t 310 tbl v8.16b, {$sb1u}, v11.16b 311 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 312 eor v12.16b, v12.16b, v16.16b 313 tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u 314 tbl v13.16b, {$sb2t}, v10.16b 315 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 316 eor v8.16b, v8.16b, v12.16b 317 tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t 318 tbl v10.16b, {$sb2u}, v11.16b 319 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 320 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B 321 tbl v11.16b, {v8.16b}, v1.16b 322 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A 323 eor v10.16b, v10.16b, v13.16b 324 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D 325 tbl v8.16b, {v8.16b}, v4.16b 326 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B 327 eor v11.16b, v11.16b, v10.16b 328 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C 329 tbl v12.16b, {v11.16b},v1.16b 330 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D 331 eor v8.16b, v8.16b, v11.16b 332 and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4 333 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D 334 eor v8.16b, v8.16b, v12.16b 335 sub w8, w8, #1 // nr-- 336 337.Lenc_2x_entry: 338 // top of round 339 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k 340 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i 341 and v9.16b, v8.16b, v17.16b 342 ushr v8.16b, v8.16b, #4 343 tbl v5.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k 344 tbl v13.16b, {$invhi},v9.16b 345 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 346 eor v9.16b, v9.16b, v8.16b 347 tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 348 tbl v11.16b, {$invlo},v8.16b 349 tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 350 tbl v12.16b, {$invlo},v9.16b 351 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 352 eor v11.16b, v11.16b, v13.16b 353 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 354 eor v12.16b, v12.16b, v13.16b 355 tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 356 tbl v10.16b, {$invlo},v11.16b 357 tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 358 tbl v11.16b, {$invlo},v12.16b 359 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 360 eor v10.16b, v10.16b, v9.16b 361 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 362 eor v11.16b, v11.16b, v8.16b 363 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 364 cbnz w8, .Lenc_2x_loop 365 366 // middle of last round 367 add x10, x11, #0x80 368 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo 369 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 370 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 371 tbl v12.16b, {$sbou}, v10.16b 372 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] 373 tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t 374 tbl v8.16b, {$sbot}, v11.16b 375 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 376 eor v12.16b, v12.16b, v16.16b 377 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 378 eor v8.16b, v8.16b, v12.16b 379 tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 380 tbl v1.16b, {v8.16b},v1.16b 381 ret 382.size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x 383 384.type _vpaes_decrypt_preheat,%function 385.align 4 386_vpaes_decrypt_preheat: 387 adrp x10, :pg_hi21:.Lk_inv 388 add x10, x10, :lo12:.Lk_inv 389 movi v17.16b, #0x0f 390 adrp x11, :pg_hi21:.Lk_dipt 391 add x11, x11, :lo12:.Lk_dipt 392 ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv 393 ld1 {v20.2d-v23.2d}, [x11],#64 // .Lk_dipt, .Lk_dsbo 394 ld1 {v24.2d-v27.2d}, [x11],#64 // .Lk_dsb9, .Lk_dsbd 395 ld1 {v28.2d-v31.2d}, [x11] // .Lk_dsbb, .Lk_dsbe 396 ret 397.size _vpaes_decrypt_preheat,.-_vpaes_decrypt_preheat 398 399## 400## Decryption core 401## 402## Same API as encryption core. 403## 404.type _vpaes_decrypt_core,%function 405.align 4 406_vpaes_decrypt_core: 407 mov x9, $key 408 ldr w8, [$key,#240] // pull rounds 409 410 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo 411 lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11 412 eor x11, x11, #0x30 // xor \$0x30, %r11 413 adrp x10, :pg_hi21:.Lk_sr 414 add x10, x10, :lo12:.Lk_sr 415 and x11, x11, #0x30 // and \$0x30, %r11 416 add x11, x11, x10 417 adrp x10, :pg_hi21:.Lk_mc_forward+48 418 add x10, x10, :lo12:.Lk_mc_forward+48 419 420 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key 421 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 422 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 423 tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 424 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5 425 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi 426 tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 427 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 428 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 429 b .Ldec_entry 430 431.align 4 432.Ldec_loop: 433// 434// Inverse mix columns 435// 436 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u 437 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t 438 tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u 439 tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t 440 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 441 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu 442 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 443 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt 444 445 tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu 446 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 447 tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt 448 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 449 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu 450 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 451 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt 452 453 tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu 454 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 455 tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt 456 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 457 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu 458 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 459 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet 460 461 tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu 462 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 463 tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet 464 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 465 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5 466 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 467 sub w8, w8, #1 // sub \$1,%rax # nr-- 468 469.Ldec_entry: 470 // top of round 471 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k 472 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i 473 tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k 474 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 475 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 476 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 477 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 478 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 479 tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 480 tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 481 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 482 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 483 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 484 cbnz w8, .Ldec_loop 485 486 // middle of last round 487 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou 488 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 489 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot 490 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 491 tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t 492 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k 493 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A 494 tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0 495 ret 496.size _vpaes_decrypt_core,.-_vpaes_decrypt_core 497 498.globl vpaes_decrypt 499.type vpaes_decrypt,%function 500.align 4 501vpaes_decrypt: 502 AARCH64_SIGN_LINK_REGISTER 503 stp x29,x30,[sp,#-16]! 504 add x29,sp,#0 505 506 ld1 {v7.16b}, [$inp] 507 bl _vpaes_decrypt_preheat 508 bl _vpaes_decrypt_core 509 st1 {v0.16b}, [$out] 510 511 ldp x29,x30,[sp],#16 512 AARCH64_VALIDATE_LINK_REGISTER 513 ret 514.size vpaes_decrypt,.-vpaes_decrypt 515 516// v14-v15 input, v0-v1 output 517.type _vpaes_decrypt_2x,%function 518.align 4 519_vpaes_decrypt_2x: 520 mov x9, $key 521 ldr w8, [$key,#240] // pull rounds 522 523 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo 524 lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11 525 eor x11, x11, #0x30 // xor \$0x30, %r11 526 adrp x10, :pg_hi21:.Lk_sr 527 add x10, x10, :lo12:.Lk_sr 528 and x11, x11, #0x30 // and \$0x30, %r11 529 add x11, x11, x10 530 adrp x10, :pg_hi21:.Lk_mc_forward+48 531 add x10, x10, :lo12:.Lk_mc_forward+48 532 533 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key 534 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 535 ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 536 and v9.16b, v15.16b, v17.16b 537 ushr v8.16b, v15.16b, #4 538 tbl v2.16b, {$iptlo},v1.16b // vpshufb %xmm1, %xmm2, %xmm2 539 tbl v10.16b, {$iptlo},v9.16b 540 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5 541 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi 542 tbl v0.16b, {$ipthi},v0.16b // vpshufb %xmm0, %xmm1, %xmm0 543 tbl v8.16b, {$ipthi},v8.16b 544 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 545 eor v10.16b, v10.16b, v16.16b 546 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 547 eor v8.16b, v8.16b, v10.16b 548 b .Ldec_2x_entry 549 550.align 4 551.Ldec_2x_loop: 552// 553// Inverse mix columns 554// 555 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u 556 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t 557 tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u 558 tbl v12.16b, {$sb9u}, v10.16b 559 tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t 560 tbl v9.16b, {$sb9t}, v11.16b 561 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 562 eor v8.16b, v12.16b, v16.16b 563 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu 564 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 565 eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 566 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt 567 568 tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu 569 tbl v12.16b, {$sbdu}, v10.16b 570 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 571 tbl v8.16b, {v8.16b},v5.16b 572 tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt 573 tbl v9.16b, {$sbdt}, v11.16b 574 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 575 eor v8.16b, v8.16b, v12.16b 576 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu 577 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 578 eor v8.16b, v8.16b, v9.16b 579 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt 580 581 tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu 582 tbl v12.16b, {$sbbu}, v10.16b 583 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 584 tbl v8.16b, {v8.16b},v5.16b 585 tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt 586 tbl v9.16b, {$sbbt}, v11.16b 587 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 588 eor v8.16b, v8.16b, v12.16b 589 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu 590 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 591 eor v8.16b, v8.16b, v9.16b 592 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet 593 594 tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu 595 tbl v12.16b, {$sbeu}, v10.16b 596 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 597 tbl v8.16b, {v8.16b},v5.16b 598 tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet 599 tbl v9.16b, {$sbet}, v11.16b 600 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 601 eor v8.16b, v8.16b, v12.16b 602 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5 603 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 604 eor v8.16b, v8.16b, v9.16b 605 sub w8, w8, #1 // sub \$1,%rax # nr-- 606 607.Ldec_2x_entry: 608 // top of round 609 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k 610 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i 611 and v9.16b, v8.16b, v17.16b 612 ushr v8.16b, v8.16b, #4 613 tbl v2.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k 614 tbl v10.16b, {$invhi},v9.16b 615 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 616 eor v9.16b, v9.16b, v8.16b 617 tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 618 tbl v11.16b, {$invlo},v8.16b 619 tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 620 tbl v12.16b, {$invlo},v9.16b 621 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 622 eor v11.16b, v11.16b, v10.16b 623 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 624 eor v12.16b, v12.16b, v10.16b 625 tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 626 tbl v10.16b, {$invlo},v11.16b 627 tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 628 tbl v11.16b, {$invlo},v12.16b 629 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 630 eor v10.16b, v10.16b, v9.16b 631 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 632 eor v11.16b, v11.16b, v8.16b 633 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 634 cbnz w8, .Ldec_2x_loop 635 636 // middle of last round 637 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou 638 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 639 tbl v12.16b, {$sbou}, v10.16b 640 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot 641 tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t 642 tbl v9.16b, {$sbot}, v11.16b 643 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 644 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k 645 eor v12.16b, v12.16b, v16.16b 646 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A 647 eor v8.16b, v9.16b, v12.16b 648 tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0 649 tbl v1.16b, {v8.16b},v2.16b 650 ret 651.size _vpaes_decrypt_2x,.-_vpaes_decrypt_2x 652___ 653} 654{ 655my ($inp,$bits,$out,$dir)=("x0","w1","x2","w3"); 656my ($invlo,$invhi,$iptlo,$ipthi,$rcon) = map("v$_.16b",(18..21,8)); 657 658$code.=<<___; 659######################################################## 660## ## 661## AES key schedule ## 662## ## 663######################################################## 664.type _vpaes_key_preheat,%function 665.align 4 666_vpaes_key_preheat: 667 adrp x10, :pg_hi21:.Lk_inv 668 add x10, x10, :lo12:.Lk_inv 669 movi v16.16b, #0x5b // .Lk_s63 670 adrp x11, :pg_hi21:.Lk_sb1 671 add x11, x11, :lo12:.Lk_sb1 672 movi v17.16b, #0x0f // .Lk_s0F 673 ld1 {v18.2d-v21.2d}, [x10] // .Lk_inv, .Lk_ipt 674 adrp x10, :pg_hi21:.Lk_dksd 675 add x10, x10, :lo12:.Lk_dksd 676 ld1 {v22.2d-v23.2d}, [x11] // .Lk_sb1 677 adrp x11, :pg_hi21:.Lk_mc_forward 678 add x11, x11, :lo12:.Lk_mc_forward 679 ld1 {v24.2d-v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb 680 ld1 {v28.2d-v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9 681 ld1 {v8.2d}, [x10] // .Lk_rcon 682 ld1 {v9.2d}, [x11] // .Lk_mc_forward[0] 683 ret 684.size _vpaes_key_preheat,.-_vpaes_key_preheat 685 686.type _vpaes_schedule_core,%function 687.align 4 688_vpaes_schedule_core: 689 AARCH64_SIGN_LINK_REGISTER 690 stp x29, x30, [sp,#-16]! 691 add x29,sp,#0 692 693 bl _vpaes_key_preheat // load the tables 694 695 ld1 {v0.16b}, [$inp],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) 696 697 // input transform 698 mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 699 bl _vpaes_schedule_transform 700 mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 701 702 adrp x10, :pg_hi21:.Lk_sr // lea .Lk_sr(%rip),%r10 703 add x10, x10, :lo12:.Lk_sr 704 705 add x8, x8, x10 706 cbnz $dir, .Lschedule_am_decrypting 707 708 // encrypting, output zeroth round key after transform 709 st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx) 710 b .Lschedule_go 711 712.Lschedule_am_decrypting: 713 // decrypting, output zeroth round key after shiftrows 714 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 715 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 716 st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx) 717 eor x8, x8, #0x30 // xor \$0x30, %r8 718 719.Lschedule_go: 720 cmp $bits, #192 // cmp \$192, %esi 721 b.hi .Lschedule_256 722 b.eq .Lschedule_192 723 // 128: fall though 724 725## 726## .schedule_128 727## 728## 128-bit specific part of key schedule. 729## 730## This schedule is really simple, because all its parts 731## are accomplished by the subroutines. 732## 733.Lschedule_128: 734 mov $inp, #10 // mov \$10, %esi 735 736.Loop_schedule_128: 737 sub $inp, $inp, #1 // dec %esi 738 bl _vpaes_schedule_round 739 cbz $inp, .Lschedule_mangle_last 740 bl _vpaes_schedule_mangle // write output 741 b .Loop_schedule_128 742 743## 744## .aes_schedule_192 745## 746## 192-bit specific part of key schedule. 747## 748## The main body of this schedule is the same as the 128-bit 749## schedule, but with more smearing. The long, high side is 750## stored in %xmm7 as before, and the short, low side is in 751## the high bits of %xmm6. 752## 753## This schedule is somewhat nastier, however, because each 754## round produces 192 bits of key material, or 1.5 round keys. 755## Therefore, on each cycle we do 2 rounds and produce 3 round 756## keys. 757## 758.align 4 759.Lschedule_192: 760 sub $inp, $inp, #8 761 ld1 {v0.16b}, [$inp] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) 762 bl _vpaes_schedule_transform // input transform 763 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part 764 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 765 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros 766 mov $inp, #4 // mov \$4, %esi 767 768.Loop_schedule_192: 769 sub $inp, $inp, #1 // dec %esi 770 bl _vpaes_schedule_round 771 ext v0.16b, v6.16b, v0.16b, #8 // vpalignr \$8,%xmm6,%xmm0,%xmm0 772 bl _vpaes_schedule_mangle // save key n 773 bl _vpaes_schedule_192_smear 774 bl _vpaes_schedule_mangle // save key n+1 775 bl _vpaes_schedule_round 776 cbz $inp, .Lschedule_mangle_last 777 bl _vpaes_schedule_mangle // save key n+2 778 bl _vpaes_schedule_192_smear 779 b .Loop_schedule_192 780 781## 782## .aes_schedule_256 783## 784## 256-bit specific part of key schedule. 785## 786## The structure here is very similar to the 128-bit 787## schedule, but with an additional "low side" in 788## %xmm6. The low side's rounds are the same as the 789## high side's, except no rcon and no rotation. 790## 791.align 4 792.Lschedule_256: 793 ld1 {v0.16b}, [$inp] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) 794 bl _vpaes_schedule_transform // input transform 795 mov $inp, #7 // mov \$7, %esi 796 797.Loop_schedule_256: 798 sub $inp, $inp, #1 // dec %esi 799 bl _vpaes_schedule_mangle // output low result 800 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 801 802 // high round 803 bl _vpaes_schedule_round 804 cbz $inp, .Lschedule_mangle_last 805 bl _vpaes_schedule_mangle 806 807 // low round. swap xmm7 and xmm6 808 dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0 809 movi v4.16b, #0 810 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 811 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 812 bl _vpaes_schedule_low_round 813 mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 814 815 b .Loop_schedule_256 816 817## 818## .aes_schedule_mangle_last 819## 820## Mangler for last round of key schedule 821## Mangles %xmm0 822## when encrypting, outputs out(%xmm0) ^ 63 823## when decrypting, outputs unskew(%xmm0) 824## 825## Always called right before return... jumps to cleanup and exits 826## 827.align 4 828.Lschedule_mangle_last: 829 // schedule last round key from xmm0 830 adrp x11, :pg_hi21:.Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew 831 add x11, x11, :lo12:.Lk_deskew 832 833 cbnz $dir, .Lschedule_mangle_last_dec 834 835 // encrypting 836 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 837 adrp x11, :pg_hi21:.Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform 838 add x11, x11, :lo12:.Lk_opt 839 add $out, $out, #32 // add \$32, %rdx 840 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute 841 842.Lschedule_mangle_last_dec: 843 ld1 {v20.2d-v21.2d}, [x11] // reload constants 844 sub $out, $out, #16 // add \$-16, %rdx 845 eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0 846 bl _vpaes_schedule_transform // output transform 847 st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx) # save last key 848 849 // cleanup 850 eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 851 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 852 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 853 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 854 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 855 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 856 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 857 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 858 ldp x29, x30, [sp],#16 859 AARCH64_VALIDATE_LINK_REGISTER 860 ret 861.size _vpaes_schedule_core,.-_vpaes_schedule_core 862 863## 864## .aes_schedule_192_smear 865## 866## Smear the short, low side in the 192-bit key schedule. 867## 868## Inputs: 869## %xmm7: high side, b a x y 870## %xmm6: low side, d c 0 0 871## %xmm13: 0 872## 873## Outputs: 874## %xmm6: b+c+d b+c 0 0 875## %xmm0: b+c+d b+c b a 876## 877.type _vpaes_schedule_192_smear,%function 878.align 4 879_vpaes_schedule_192_smear: 880 movi v1.16b, #0 881 dup v0.4s, v7.s[3] 882 ins v1.s[3], v6.s[2] // vpshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 883 ins v0.s[0], v7.s[2] // vpshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a 884 eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 885 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 886 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a 887 mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 888 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros 889 ret 890.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear 891 892## 893## .aes_schedule_round 894## 895## Runs one main round of the key schedule on %xmm0, %xmm7 896## 897## Specifically, runs subbytes on the high dword of %xmm0 898## then rotates it by one byte and xors into the low dword of 899## %xmm7. 900## 901## Adds rcon from low byte of %xmm8, then rotates %xmm8 for 902## next rcon. 903## 904## Smears the dwords of %xmm7 by xoring the low into the 905## second low, result into third, result into highest. 906## 907## Returns results in %xmm7 = %xmm0. 908## Clobbers %xmm1-%xmm4, %r11. 909## 910.type _vpaes_schedule_round,%function 911.align 4 912_vpaes_schedule_round: 913 // extract rcon from xmm8 914 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 915 ext v1.16b, $rcon, v4.16b, #15 // vpalignr \$15, %xmm8, %xmm4, %xmm1 916 ext $rcon, $rcon, $rcon, #15 // vpalignr \$15, %xmm8, %xmm8, %xmm8 917 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 918 919 // rotate 920 dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0 921 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr \$1, %xmm0, %xmm0, %xmm0 922 923 // fall through... 924 925 // low round: same as high round, but no rotation and no rcon. 926_vpaes_schedule_low_round: 927 // smear xmm7 928 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq \$4, %xmm7, %xmm1 929 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 930 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq \$8, %xmm7, %xmm4 931 932 // subbytes 933 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k 934 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i 935 eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 936 tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k 937 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 938 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 939 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 940 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 941 eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7 942 tbl v3.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak 943 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 944 tbl v2.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak 945 eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io 946 eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo 947 tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou 948 tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t 949 eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output 950 951 // add in smeared stuff 952 eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 953 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 954 ret 955.size _vpaes_schedule_round,.-_vpaes_schedule_round 956 957## 958## .aes_schedule_transform 959## 960## Linear-transform %xmm0 according to tables at (%r11) 961## 962## Requires that %xmm9 = 0x0F0F... as in preheat 963## Output in %xmm0 964## Clobbers %xmm1, %xmm2 965## 966.type _vpaes_schedule_transform,%function 967.align 4 968_vpaes_schedule_transform: 969 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 970 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 971 // vmovdqa (%r11), %xmm2 # lo 972 tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 973 // vmovdqa 16(%r11), %xmm1 # hi 974 tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 975 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 976 ret 977.size _vpaes_schedule_transform,.-_vpaes_schedule_transform 978 979## 980## .aes_schedule_mangle 981## 982## Mangle xmm0 from (basis-transformed) standard version 983## to our version. 984## 985## On encrypt, 986## xor with 0x63 987## multiply by circulant 0,1,1,1 988## apply shiftrows transform 989## 990## On decrypt, 991## xor with 0x63 992## multiply by "inverse mixcolumns" circulant E,B,D,9 993## deskew 994## apply shiftrows transform 995## 996## 997## Writes out to (%rdx), and increments or decrements it 998## Keeps track of round number mod 4 in %r8 999## Preserves xmm0 1000## Clobbers xmm1-xmm5 1001## 1002.type _vpaes_schedule_mangle,%function 1003.align 4 1004_vpaes_schedule_mangle: 1005 mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later 1006 // vmovdqa .Lk_mc_forward(%rip),%xmm5 1007 cbnz $dir, .Lschedule_mangle_dec 1008 1009 // encrypting 1010 eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4 1011 add $out, $out, #16 // add \$16, %rdx 1012 tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 1013 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 1014 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 1015 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 1016 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 1017 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 1018 1019 b .Lschedule_mangle_both 1020.align 4 1021.Lschedule_mangle_dec: 1022 // inverse mix columns 1023 // lea .Lk_dksd(%rip),%r11 1024 ushr v1.16b, v4.16b, #4 // vpsrlb \$4, %xmm4, %xmm1 # 1 = hi 1025 and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo 1026 1027 // vmovdqa 0x00(%r11), %xmm2 1028 tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 1029 // vmovdqa 0x10(%r11), %xmm3 1030 tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 1031 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 1032 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 1033 1034 // vmovdqa 0x20(%r11), %xmm2 1035 tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 1036 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 1037 // vmovdqa 0x30(%r11), %xmm3 1038 tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 1039 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 1040 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 1041 1042 // vmovdqa 0x40(%r11), %xmm2 1043 tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 1044 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 1045 // vmovdqa 0x50(%r11), %xmm3 1046 tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 1047 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 1048 1049 // vmovdqa 0x60(%r11), %xmm2 1050 tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 1051 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 1052 // vmovdqa 0x70(%r11), %xmm4 1053 tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4 1054 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 1055 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 1056 eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3 1057 1058 sub $out, $out, #16 // add \$-16, %rdx 1059 1060.Lschedule_mangle_both: 1061 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 1062 add x8, x8, #48 // add \$-16, %r8 1063 and x8, x8, #~(1<<6) // and \$0x30, %r8 1064 st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx) 1065 ret 1066.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle 1067 1068.globl vpaes_set_encrypt_key 1069.type vpaes_set_encrypt_key,%function 1070.align 4 1071vpaes_set_encrypt_key: 1072 AARCH64_SIGN_LINK_REGISTER 1073 stp x29,x30,[sp,#-16]! 1074 add x29,sp,#0 1075 stp d8,d9,[sp,#-16]! // ABI spec says so 1076 1077 lsr w9, $bits, #5 // shr \$5,%eax 1078 add w9, w9, #5 // \$5,%eax 1079 str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; 1080 1081 mov $dir, #0 // mov \$0,%ecx 1082 mov x8, #0x30 // mov \$0x30,%r8d 1083 bl _vpaes_schedule_core 1084 eor x0, x0, x0 1085 1086 ldp d8,d9,[sp],#16 1087 ldp x29,x30,[sp],#16 1088 AARCH64_VALIDATE_LINK_REGISTER 1089 ret 1090.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key 1091 1092.globl vpaes_set_decrypt_key 1093.type vpaes_set_decrypt_key,%function 1094.align 4 1095vpaes_set_decrypt_key: 1096 AARCH64_SIGN_LINK_REGISTER 1097 stp x29,x30,[sp,#-16]! 1098 add x29,sp,#0 1099 stp d8,d9,[sp,#-16]! // ABI spec says so 1100 1101 lsr w9, $bits, #5 // shr \$5,%eax 1102 add w9, w9, #5 // \$5,%eax 1103 str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; 1104 lsl w9, w9, #4 // shl \$4,%eax 1105 add $out, $out, #16 // lea 16(%rdx,%rax),%rdx 1106 add $out, $out, x9 1107 1108 mov $dir, #1 // mov \$1,%ecx 1109 lsr w8, $bits, #1 // shr \$1,%r8d 1110 and x8, x8, #32 // and \$32,%r8d 1111 eor x8, x8, #32 // xor \$32,%r8d # nbits==192?0:32 1112 bl _vpaes_schedule_core 1113 1114 ldp d8,d9,[sp],#16 1115 ldp x29,x30,[sp],#16 1116 AARCH64_VALIDATE_LINK_REGISTER 1117 ret 1118.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key 1119___ 1120} 1121{ 1122my ($inp,$out,$len,$key,$ivec,$dir) = map("x$_",(0..5)); 1123 1124$code.=<<___; 1125.globl vpaes_cbc_encrypt 1126.type vpaes_cbc_encrypt,%function 1127.align 4 1128vpaes_cbc_encrypt: 1129 AARCH64_SIGN_LINK_REGISTER 1130 cbz $len, .Lcbc_abort 1131 cmp w5, #0 // check direction 1132 b.eq vpaes_cbc_decrypt 1133 1134 stp x29,x30,[sp,#-16]! 1135 add x29,sp,#0 1136 1137 mov x17, $len // reassign 1138 mov x2, $key // reassign 1139 1140 ld1 {v0.16b}, [$ivec] // load ivec 1141 bl _vpaes_encrypt_preheat 1142 b .Lcbc_enc_loop 1143 1144.align 4 1145.Lcbc_enc_loop: 1146 ld1 {v7.16b}, [$inp],#16 // load input 1147 eor v7.16b, v7.16b, v0.16b // xor with ivec 1148 bl _vpaes_encrypt_core 1149 st1 {v0.16b}, [$out],#16 // save output 1150 subs x17, x17, #16 1151 b.hi .Lcbc_enc_loop 1152 1153 st1 {v0.16b}, [$ivec] // write ivec 1154 1155 ldp x29,x30,[sp],#16 1156.Lcbc_abort: 1157 AARCH64_VALIDATE_LINK_REGISTER 1158 ret 1159.size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt 1160 1161.type vpaes_cbc_decrypt,%function 1162.align 4 1163vpaes_cbc_decrypt: 1164 // Not adding AARCH64_SIGN_LINK_REGISTER here because vpaes_cbc_decrypt is jumped to 1165 // only from vpaes_cbc_encrypt which has already signed the return address. 1166 stp x29,x30,[sp,#-16]! 1167 add x29,sp,#0 1168 stp d8,d9,[sp,#-16]! // ABI spec says so 1169 stp d10,d11,[sp,#-16]! 1170 stp d12,d13,[sp,#-16]! 1171 stp d14,d15,[sp,#-16]! 1172 1173 mov x17, $len // reassign 1174 mov x2, $key // reassign 1175 ld1 {v6.16b}, [$ivec] // load ivec 1176 bl _vpaes_decrypt_preheat 1177 tst x17, #16 1178 b.eq .Lcbc_dec_loop2x 1179 1180 ld1 {v7.16b}, [$inp], #16 // load input 1181 bl _vpaes_decrypt_core 1182 eor v0.16b, v0.16b, v6.16b // xor with ivec 1183 orr v6.16b, v7.16b, v7.16b // next ivec value 1184 st1 {v0.16b}, [$out], #16 1185 subs x17, x17, #16 1186 b.ls .Lcbc_dec_done 1187 1188.align 4 1189.Lcbc_dec_loop2x: 1190 ld1 {v14.16b,v15.16b}, [$inp], #32 1191 bl _vpaes_decrypt_2x 1192 eor v0.16b, v0.16b, v6.16b // xor with ivec 1193 eor v1.16b, v1.16b, v14.16b 1194 orr v6.16b, v15.16b, v15.16b 1195 st1 {v0.16b,v1.16b}, [$out], #32 1196 subs x17, x17, #32 1197 b.hi .Lcbc_dec_loop2x 1198 1199.Lcbc_dec_done: 1200 st1 {v6.16b}, [$ivec] 1201 1202 ldp d14,d15,[sp],#16 1203 ldp d12,d13,[sp],#16 1204 ldp d10,d11,[sp],#16 1205 ldp d8,d9,[sp],#16 1206 ldp x29,x30,[sp],#16 1207 AARCH64_VALIDATE_LINK_REGISTER 1208 ret 1209.size vpaes_cbc_decrypt,.-vpaes_cbc_decrypt 1210___ 1211# We omit vpaes_ecb_* in BoringSSL. They are unused. 1212if (0) { 1213$code.=<<___; 1214.globl vpaes_ecb_encrypt 1215.type vpaes_ecb_encrypt,%function 1216.align 4 1217vpaes_ecb_encrypt: 1218 AARCH64_SIGN_LINK_REGISTER 1219 stp x29,x30,[sp,#-16]! 1220 add x29,sp,#0 1221 stp d8,d9,[sp,#-16]! // ABI spec says so 1222 stp d10,d11,[sp,#-16]! 1223 stp d12,d13,[sp,#-16]! 1224 stp d14,d15,[sp,#-16]! 1225 1226 mov x17, $len 1227 mov x2, $key 1228 bl _vpaes_encrypt_preheat 1229 tst x17, #16 1230 b.eq .Lecb_enc_loop 1231 1232 ld1 {v7.16b}, [$inp],#16 1233 bl _vpaes_encrypt_core 1234 st1 {v0.16b}, [$out],#16 1235 subs x17, x17, #16 1236 b.ls .Lecb_enc_done 1237 1238.align 4 1239.Lecb_enc_loop: 1240 ld1 {v14.16b,v15.16b}, [$inp], #32 1241 bl _vpaes_encrypt_2x 1242 st1 {v0.16b,v1.16b}, [$out], #32 1243 subs x17, x17, #32 1244 b.hi .Lecb_enc_loop 1245 1246.Lecb_enc_done: 1247 ldp d14,d15,[sp],#16 1248 ldp d12,d13,[sp],#16 1249 ldp d10,d11,[sp],#16 1250 ldp d8,d9,[sp],#16 1251 ldp x29,x30,[sp],#16 1252 AARCH64_VALIDATE_LINK_REGISTER 1253 ret 1254.size vpaes_ecb_encrypt,.-vpaes_ecb_encrypt 1255 1256.globl vpaes_ecb_decrypt 1257.type vpaes_ecb_decrypt,%function 1258.align 4 1259vpaes_ecb_decrypt: 1260 AARCH64_SIGN_LINK_REGISTER 1261 stp x29,x30,[sp,#-16]! 1262 add x29,sp,#0 1263 stp d8,d9,[sp,#-16]! // ABI spec says so 1264 stp d10,d11,[sp,#-16]! 1265 stp d12,d13,[sp,#-16]! 1266 stp d14,d15,[sp,#-16]! 1267 1268 mov x17, $len 1269 mov x2, $key 1270 bl _vpaes_decrypt_preheat 1271 tst x17, #16 1272 b.eq .Lecb_dec_loop 1273 1274 ld1 {v7.16b}, [$inp],#16 1275 bl _vpaes_encrypt_core 1276 st1 {v0.16b}, [$out],#16 1277 subs x17, x17, #16 1278 b.ls .Lecb_dec_done 1279 1280.align 4 1281.Lecb_dec_loop: 1282 ld1 {v14.16b,v15.16b}, [$inp], #32 1283 bl _vpaes_decrypt_2x 1284 st1 {v0.16b,v1.16b}, [$out], #32 1285 subs x17, x17, #32 1286 b.hi .Lecb_dec_loop 1287 1288.Lecb_dec_done: 1289 ldp d14,d15,[sp],#16 1290 ldp d12,d13,[sp],#16 1291 ldp d10,d11,[sp],#16 1292 ldp d8,d9,[sp],#16 1293 ldp x29,x30,[sp],#16 1294 AARCH64_VALIDATE_LINK_REGISTER 1295 ret 1296.size vpaes_ecb_decrypt,.-vpaes_ecb_decrypt 1297___ 1298} 1299 1300my ($ctr, $ctr_tmp) = ("w6", "w7"); 1301 1302# void vpaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len, 1303# const AES_KEY *key, const uint8_t ivec[16]); 1304$code.=<<___; 1305.globl vpaes_ctr32_encrypt_blocks 1306.type vpaes_ctr32_encrypt_blocks,%function 1307.align 4 1308vpaes_ctr32_encrypt_blocks: 1309 AARCH64_SIGN_LINK_REGISTER 1310 stp x29,x30,[sp,#-16]! 1311 add x29,sp,#0 1312 stp d8,d9,[sp,#-16]! // ABI spec says so 1313 stp d10,d11,[sp,#-16]! 1314 stp d12,d13,[sp,#-16]! 1315 stp d14,d15,[sp,#-16]! 1316 1317 cbz $len, .Lctr32_done 1318 1319 // Note, unlike the other functions, $len here is measured in blocks, 1320 // not bytes. 1321 mov x17, $len 1322 mov x2, $key 1323 1324 // Load the IV and counter portion. 1325 ldr $ctr, [$ivec, #12] 1326 ld1 {v7.16b}, [$ivec] 1327 1328 bl _vpaes_encrypt_preheat 1329 tst x17, #1 1330 rev $ctr, $ctr // The counter is big-endian. 1331 b.eq .Lctr32_prep_loop 1332 1333 // Handle one block so the remaining block count is even for 1334 // _vpaes_encrypt_2x. 1335 ld1 {v6.16b}, [$inp], #16 // Load input ahead of time 1336 bl _vpaes_encrypt_core 1337 eor v0.16b, v0.16b, v6.16b // XOR input and result 1338 st1 {v0.16b}, [$out], #16 1339 subs x17, x17, #1 1340 // Update the counter. 1341 add $ctr, $ctr, #1 1342 rev $ctr_tmp, $ctr 1343 mov v7.s[3], $ctr_tmp 1344 b.ls .Lctr32_done 1345 1346.Lctr32_prep_loop: 1347 // _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x 1348 // uses v14 and v15. 1349 mov v15.16b, v7.16b 1350 mov v14.16b, v7.16b 1351 add $ctr, $ctr, #1 1352 rev $ctr_tmp, $ctr 1353 mov v15.s[3], $ctr_tmp 1354 1355.Lctr32_loop: 1356 ld1 {v6.16b,v7.16b}, [$inp], #32 // Load input ahead of time 1357 bl _vpaes_encrypt_2x 1358 eor v0.16b, v0.16b, v6.16b // XOR input and result 1359 eor v1.16b, v1.16b, v7.16b // XOR input and result (#2) 1360 st1 {v0.16b,v1.16b}, [$out], #32 1361 subs x17, x17, #2 1362 // Update the counter. 1363 add $ctr_tmp, $ctr, #1 1364 add $ctr, $ctr, #2 1365 rev $ctr_tmp, $ctr_tmp 1366 mov v14.s[3], $ctr_tmp 1367 rev $ctr_tmp, $ctr 1368 mov v15.s[3], $ctr_tmp 1369 b.hi .Lctr32_loop 1370 1371.Lctr32_done: 1372 ldp d14,d15,[sp],#16 1373 ldp d12,d13,[sp],#16 1374 ldp d10,d11,[sp],#16 1375 ldp d8,d9,[sp],#16 1376 ldp x29,x30,[sp],#16 1377 AARCH64_VALIDATE_LINK_REGISTER 1378 ret 1379.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks 1380___ 1381} 1382 1383print $code; 1384 1385close STDOUT or die "error closing STDOUT: $!"; 1386