aesv8-armx32.S 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809
  1. // This file is generated from a similarly-named Perl script in the BoringSSL
  2. // source tree. Do not edit by hand.
  3. #if !defined(__has_feature)
  4. #define __has_feature(x) 0
  5. #endif
  6. #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
  7. #define OPENSSL_NO_ASM
  8. #endif
  9. #if !defined(OPENSSL_NO_ASM)
  10. #if defined(BORINGSSL_PREFIX)
  11. #include <boringssl_prefix_symbols_asm.h>
  12. #endif
  13. #include <openssl/arm_arch.h>
  14. #if __ARM_MAX_ARCH__>=7
  15. .text
  16. .code 32
  17. #undef __thumb2__
  18. .align 5
  19. Lrcon:
  20. .long 0x01,0x01,0x01,0x01
  21. .long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d @ rotate-n-splat
  22. .long 0x1b,0x1b,0x1b,0x1b
  23. .text
  24. .globl _aes_hw_set_encrypt_key
  25. .private_extern _aes_hw_set_encrypt_key
  26. #ifdef __thumb2__
  27. .thumb_func _aes_hw_set_encrypt_key
  28. #endif
  29. .align 5
  30. _aes_hw_set_encrypt_key:
  31. Lenc_key:
  32. mov r3,#-1
  33. cmp r0,#0
  34. beq Lenc_key_abort
  35. cmp r2,#0
  36. beq Lenc_key_abort
  37. mov r3,#-2
  38. cmp r1,#128
  39. blt Lenc_key_abort
  40. cmp r1,#256
  41. bgt Lenc_key_abort
  42. tst r1,#0x3f
  43. bne Lenc_key_abort
  44. adr r3,Lrcon
  45. cmp r1,#192
  46. veor q0,q0,q0
  47. vld1.8 {q3},[r0]!
  48. mov r1,#8 @ reuse r1
  49. vld1.32 {q1,q2},[r3]!
  50. blt Loop128
  51. beq L192
  52. b L256
  53. .align 4
  54. Loop128:
  55. vtbl.8 d20,{q3},d4
  56. vtbl.8 d21,{q3},d5
  57. vext.8 q9,q0,q3,#12
  58. vst1.32 {q3},[r2]!
  59. .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
  60. subs r1,r1,#1
  61. veor q3,q3,q9
  62. vext.8 q9,q0,q9,#12
  63. veor q3,q3,q9
  64. vext.8 q9,q0,q9,#12
  65. veor q10,q10,q1
  66. veor q3,q3,q9
  67. vshl.u8 q1,q1,#1
  68. veor q3,q3,q10
  69. bne Loop128
  70. vld1.32 {q1},[r3]
  71. vtbl.8 d20,{q3},d4
  72. vtbl.8 d21,{q3},d5
  73. vext.8 q9,q0,q3,#12
  74. vst1.32 {q3},[r2]!
  75. .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
  76. veor q3,q3,q9
  77. vext.8 q9,q0,q9,#12
  78. veor q3,q3,q9
  79. vext.8 q9,q0,q9,#12
  80. veor q10,q10,q1
  81. veor q3,q3,q9
  82. vshl.u8 q1,q1,#1
  83. veor q3,q3,q10
  84. vtbl.8 d20,{q3},d4
  85. vtbl.8 d21,{q3},d5
  86. vext.8 q9,q0,q3,#12
  87. vst1.32 {q3},[r2]!
  88. .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
  89. veor q3,q3,q9
  90. vext.8 q9,q0,q9,#12
  91. veor q3,q3,q9
  92. vext.8 q9,q0,q9,#12
  93. veor q10,q10,q1
  94. veor q3,q3,q9
  95. veor q3,q3,q10
  96. vst1.32 {q3},[r2]
  97. add r2,r2,#0x50
  98. mov r12,#10
  99. b Ldone
  100. .align 4
  101. L192:
  102. vld1.8 {d16},[r0]!
  103. vmov.i8 q10,#8 @ borrow q10
  104. vst1.32 {q3},[r2]!
  105. vsub.i8 q2,q2,q10 @ adjust the mask
  106. Loop192:
  107. vtbl.8 d20,{q8},d4
  108. vtbl.8 d21,{q8},d5
  109. vext.8 q9,q0,q3,#12
  110. vst1.32 {d16},[r2]!
  111. .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
  112. subs r1,r1,#1
  113. veor q3,q3,q9
  114. vext.8 q9,q0,q9,#12
  115. veor q3,q3,q9
  116. vext.8 q9,q0,q9,#12
  117. veor q3,q3,q9
  118. vdup.32 q9,d7[1]
  119. veor q9,q9,q8
  120. veor q10,q10,q1
  121. vext.8 q8,q0,q8,#12
  122. vshl.u8 q1,q1,#1
  123. veor q8,q8,q9
  124. veor q3,q3,q10
  125. veor q8,q8,q10
  126. vst1.32 {q3},[r2]!
  127. bne Loop192
  128. mov r12,#12
  129. add r2,r2,#0x20
  130. b Ldone
  131. .align 4
  132. L256:
  133. vld1.8 {q8},[r0]
  134. mov r1,#7
  135. mov r12,#14
  136. vst1.32 {q3},[r2]!
  137. Loop256:
  138. vtbl.8 d20,{q8},d4
  139. vtbl.8 d21,{q8},d5
  140. vext.8 q9,q0,q3,#12
  141. vst1.32 {q8},[r2]!
  142. .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
  143. subs r1,r1,#1
  144. veor q3,q3,q9
  145. vext.8 q9,q0,q9,#12
  146. veor q3,q3,q9
  147. vext.8 q9,q0,q9,#12
  148. veor q10,q10,q1
  149. veor q3,q3,q9
  150. vshl.u8 q1,q1,#1
  151. veor q3,q3,q10
  152. vst1.32 {q3},[r2]!
  153. beq Ldone
  154. vdup.32 q10,d7[1]
  155. vext.8 q9,q0,q8,#12
  156. .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
  157. veor q8,q8,q9
  158. vext.8 q9,q0,q9,#12
  159. veor q8,q8,q9
  160. vext.8 q9,q0,q9,#12
  161. veor q8,q8,q9
  162. veor q8,q8,q10
  163. b Loop256
  164. Ldone:
  165. str r12,[r2]
  166. mov r3,#0
  167. Lenc_key_abort:
  168. mov r0,r3 @ return value
  169. bx lr
  170. .globl _aes_hw_set_decrypt_key
  171. .private_extern _aes_hw_set_decrypt_key
  172. #ifdef __thumb2__
  173. .thumb_func _aes_hw_set_decrypt_key
  174. #endif
  175. .align 5
  176. _aes_hw_set_decrypt_key:
  177. stmdb sp!,{r4,lr}
  178. bl Lenc_key
  179. cmp r0,#0
  180. bne Ldec_key_abort
  181. sub r2,r2,#240 @ restore original r2
  182. mov r4,#-16
  183. add r0,r2,r12,lsl#4 @ end of key schedule
  184. vld1.32 {q0},[r2]
  185. vld1.32 {q1},[r0]
  186. vst1.32 {q0},[r0],r4
  187. vst1.32 {q1},[r2]!
  188. Loop_imc:
  189. vld1.32 {q0},[r2]
  190. vld1.32 {q1},[r0]
  191. .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
  192. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  193. vst1.32 {q0},[r0],r4
  194. vst1.32 {q1},[r2]!
  195. cmp r0,r2
  196. bhi Loop_imc
  197. vld1.32 {q0},[r2]
  198. .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
  199. vst1.32 {q0},[r0]
  200. eor r0,r0,r0 @ return value
  201. Ldec_key_abort:
  202. ldmia sp!,{r4,pc}
  203. .globl _aes_hw_encrypt
  204. .private_extern _aes_hw_encrypt
  205. #ifdef __thumb2__
  206. .thumb_func _aes_hw_encrypt
  207. #endif
  208. .align 5
  209. _aes_hw_encrypt:
  210. AARCH64_VALID_CALL_TARGET
  211. ldr r3,[r2,#240]
  212. vld1.32 {q0},[r2]!
  213. vld1.8 {q2},[r0]
  214. sub r3,r3,#2
  215. vld1.32 {q1},[r2]!
  216. Loop_enc:
  217. .byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
  218. .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
  219. vld1.32 {q0},[r2]!
  220. subs r3,r3,#2
  221. .byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
  222. .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
  223. vld1.32 {q1},[r2]!
  224. bgt Loop_enc
  225. .byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
  226. .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
  227. vld1.32 {q0},[r2]
  228. .byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
  229. veor q2,q2,q0
  230. vst1.8 {q2},[r1]
  231. bx lr
  232. .globl _aes_hw_decrypt
  233. .private_extern _aes_hw_decrypt
  234. #ifdef __thumb2__
  235. .thumb_func _aes_hw_decrypt
  236. #endif
  237. .align 5
  238. _aes_hw_decrypt:
  239. AARCH64_VALID_CALL_TARGET
  240. ldr r3,[r2,#240]
  241. vld1.32 {q0},[r2]!
  242. vld1.8 {q2},[r0]
  243. sub r3,r3,#2
  244. vld1.32 {q1},[r2]!
  245. Loop_dec:
  246. .byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
  247. .byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
  248. vld1.32 {q0},[r2]!
  249. subs r3,r3,#2
  250. .byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
  251. .byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
  252. vld1.32 {q1},[r2]!
  253. bgt Loop_dec
  254. .byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
  255. .byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
  256. vld1.32 {q0},[r2]
  257. .byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
  258. veor q2,q2,q0
  259. vst1.8 {q2},[r1]
  260. bx lr
  261. .globl _aes_hw_cbc_encrypt
  262. .private_extern _aes_hw_cbc_encrypt
  263. #ifdef __thumb2__
  264. .thumb_func _aes_hw_cbc_encrypt
  265. #endif
  266. .align 5
  267. _aes_hw_cbc_encrypt:
  268. mov ip,sp
  269. stmdb sp!,{r4,r5,r6,r7,r8,lr}
  270. vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
  271. ldmia ip,{r4,r5} @ load remaining args
  272. subs r2,r2,#16
  273. mov r8,#16
  274. blo Lcbc_abort
  275. moveq r8,#0
  276. cmp r5,#0 @ en- or decrypting?
  277. ldr r5,[r3,#240]
  278. and r2,r2,#-16
  279. vld1.8 {q6},[r4]
  280. vld1.8 {q0},[r0],r8
  281. vld1.32 {q8,q9},[r3] @ load key schedule...
  282. sub r5,r5,#6
  283. add r7,r3,r5,lsl#4 @ pointer to last 7 round keys
  284. sub r5,r5,#2
  285. vld1.32 {q10,q11},[r7]!
  286. vld1.32 {q12,q13},[r7]!
  287. vld1.32 {q14,q15},[r7]!
  288. vld1.32 {q7},[r7]
  289. add r7,r3,#32
  290. mov r6,r5
  291. beq Lcbc_dec
  292. cmp r5,#2
  293. veor q0,q0,q6
  294. veor q5,q8,q7
  295. beq Lcbc_enc128
  296. vld1.32 {q2,q3},[r7]
  297. add r7,r3,#16
  298. add r6,r3,#16*4
  299. add r12,r3,#16*5
  300. .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
  301. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  302. add r14,r3,#16*6
  303. add r3,r3,#16*7
  304. b Lenter_cbc_enc
  305. .align 4
  306. Loop_cbc_enc:
  307. .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
  308. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  309. vst1.8 {q6},[r1]!
  310. Lenter_cbc_enc:
  311. .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
  312. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  313. .byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
  314. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  315. vld1.32 {q8},[r6]
  316. cmp r5,#4
  317. .byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
  318. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  319. vld1.32 {q9},[r12]
  320. beq Lcbc_enc192
  321. .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
  322. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  323. vld1.32 {q8},[r14]
  324. .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
  325. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  326. vld1.32 {q9},[r3]
  327. nop
  328. Lcbc_enc192:
  329. .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
  330. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  331. subs r2,r2,#16
  332. .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
  333. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  334. moveq r8,#0
  335. .byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
  336. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  337. .byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
  338. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  339. vld1.8 {q8},[r0],r8
  340. .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
  341. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  342. veor q8,q8,q5
  343. .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
  344. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  345. vld1.32 {q9},[r7] @ re-pre-load rndkey[1]
  346. .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
  347. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  348. .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
  349. veor q6,q0,q7
  350. bhs Loop_cbc_enc
  351. vst1.8 {q6},[r1]!
  352. b Lcbc_done
  353. .align 5
  354. Lcbc_enc128:
  355. vld1.32 {q2,q3},[r7]
  356. .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
  357. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  358. b Lenter_cbc_enc128
  359. Loop_cbc_enc128:
  360. .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
  361. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  362. vst1.8 {q6},[r1]!
  363. Lenter_cbc_enc128:
  364. .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
  365. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  366. subs r2,r2,#16
  367. .byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
  368. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  369. moveq r8,#0
  370. .byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
  371. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  372. .byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
  373. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  374. .byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
  375. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  376. vld1.8 {q8},[r0],r8
  377. .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
  378. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  379. .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
  380. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  381. .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
  382. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  383. veor q8,q8,q5
  384. .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
  385. veor q6,q0,q7
  386. bhs Loop_cbc_enc128
  387. vst1.8 {q6},[r1]!
  388. b Lcbc_done
  389. .align 5
  390. Lcbc_dec:
  391. vld1.8 {q10},[r0]!
  392. subs r2,r2,#32 @ bias
  393. add r6,r5,#2
  394. vorr q3,q0,q0
  395. vorr q1,q0,q0
  396. vorr q11,q10,q10
  397. blo Lcbc_dec_tail
  398. vorr q1,q10,q10
  399. vld1.8 {q10},[r0]!
  400. vorr q2,q0,q0
  401. vorr q3,q1,q1
  402. vorr q11,q10,q10
  403. Loop3x_cbc_dec:
  404. .byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
  405. .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
  406. .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
  407. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  408. .byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
  409. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  410. vld1.32 {q8},[r7]!
  411. subs r6,r6,#2
  412. .byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
  413. .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
  414. .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
  415. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  416. .byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
  417. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  418. vld1.32 {q9},[r7]!
  419. bgt Loop3x_cbc_dec
  420. .byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
  421. .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
  422. .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
  423. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  424. .byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
  425. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  426. veor q4,q6,q7
  427. subs r2,r2,#0x30
  428. veor q5,q2,q7
  429. movlo r6,r2 @ r6, r6, is zero at this point
  430. .byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
  431. .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
  432. .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
  433. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  434. .byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
  435. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  436. veor q9,q3,q7
  437. add r0,r0,r6 @ r0 is adjusted in such way that
  438. @ at exit from the loop q1-q10
  439. @ are loaded with last "words"
  440. vorr q6,q11,q11
  441. mov r7,r3
  442. .byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12
  443. .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
  444. .byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
  445. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  446. .byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
  447. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  448. vld1.8 {q2},[r0]!
  449. .byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13
  450. .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
  451. .byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
  452. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  453. .byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
  454. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  455. vld1.8 {q3},[r0]!
  456. .byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14
  457. .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
  458. .byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
  459. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  460. .byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
  461. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  462. vld1.8 {q11},[r0]!
  463. .byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15
  464. .byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
  465. .byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
  466. vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
  467. add r6,r5,#2
  468. veor q4,q4,q0
  469. veor q5,q5,q1
  470. veor q10,q10,q9
  471. vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
  472. vst1.8 {q4},[r1]!
  473. vorr q0,q2,q2
  474. vst1.8 {q5},[r1]!
  475. vorr q1,q3,q3
  476. vst1.8 {q10},[r1]!
  477. vorr q10,q11,q11
  478. bhs Loop3x_cbc_dec
  479. cmn r2,#0x30
  480. beq Lcbc_done
  481. nop
  482. Lcbc_dec_tail:
  483. .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
  484. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  485. .byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
  486. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  487. vld1.32 {q8},[r7]!
  488. subs r6,r6,#2
  489. .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
  490. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  491. .byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
  492. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  493. vld1.32 {q9},[r7]!
  494. bgt Lcbc_dec_tail
  495. .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
  496. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  497. .byte 0x60,0x43,0xf0,0xf3 @ aesd q10,q8
  498. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  499. .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
  500. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  501. .byte 0x62,0x43,0xf0,0xf3 @ aesd q10,q9
  502. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  503. .byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
  504. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  505. .byte 0x68,0x43,0xf0,0xf3 @ aesd q10,q12
  506. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  507. cmn r2,#0x20
  508. .byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
  509. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  510. .byte 0x6a,0x43,0xf0,0xf3 @ aesd q10,q13
  511. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  512. veor q5,q6,q7
  513. .byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
  514. .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
  515. .byte 0x6c,0x43,0xf0,0xf3 @ aesd q10,q14
  516. .byte 0xe4,0x43,0xf0,0xf3 @ aesimc q10,q10
  517. veor q9,q3,q7
  518. .byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
  519. .byte 0x6e,0x43,0xf0,0xf3 @ aesd q10,q15
  520. beq Lcbc_dec_one
  521. veor q5,q5,q1
  522. veor q9,q9,q10
  523. vorr q6,q11,q11
  524. vst1.8 {q5},[r1]!
  525. vst1.8 {q9},[r1]!
  526. b Lcbc_done
  527. Lcbc_dec_one:
  528. veor q5,q5,q10
  529. vorr q6,q11,q11
  530. vst1.8 {q5},[r1]!
  531. Lcbc_done:
  532. vst1.8 {q6},[r4]
  533. Lcbc_abort:
  534. vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
  535. ldmia sp!,{r4,r5,r6,r7,r8,pc}
  536. .globl _aes_hw_ctr32_encrypt_blocks
  537. .private_extern _aes_hw_ctr32_encrypt_blocks
  538. #ifdef __thumb2__
  539. .thumb_func _aes_hw_ctr32_encrypt_blocks
  540. #endif
  541. .align 5
  542. _aes_hw_ctr32_encrypt_blocks:
  543. mov ip,sp
  544. stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,lr}
  545. vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ ABI specification says so
  546. ldr r4, [ip] @ load remaining arg
  547. ldr r5,[r3,#240]
  548. ldr r8, [r4, #12]
  549. vld1.32 {q0},[r4]
  550. vld1.32 {q8,q9},[r3] @ load key schedule...
  551. sub r5,r5,#4
  552. mov r12,#16
  553. cmp r2,#2
  554. add r7,r3,r5,lsl#4 @ pointer to last 5 round keys
  555. sub r5,r5,#2
  556. vld1.32 {q12,q13},[r7]!
  557. vld1.32 {q14,q15},[r7]!
  558. vld1.32 {q7},[r7]
  559. add r7,r3,#32
  560. mov r6,r5
  561. movlo r12,#0
  562. @ ARM Cortex-A57 and Cortex-A72 cores running in 32-bit mode are
  563. @ affected by silicon errata #1742098 [0] and #1655431 [1],
  564. @ respectively, where the second instruction of an aese/aesmc
  565. @ instruction pair may execute twice if an interrupt is taken right
  566. @ after the first instruction consumes an input register of which a
  567. @ single 32-bit lane has been updated the last time it was modified.
  568. @
  569. @ This function uses a counter in one 32-bit lane. The
  570. @ could write to q1 and q10 directly, but that trips this bugs.
  571. @ We write to q6 and copy to the final register as a workaround.
  572. @
  573. @ [0] ARM-EPM-049219 v23 Cortex-A57 MPCore Software Developers Errata Notice
  574. @ [1] ARM-EPM-012079 v11.0 Cortex-A72 MPCore Software Developers Errata Notice
  575. #ifndef __ARMEB__
  576. rev r8, r8
  577. #endif
  578. add r10, r8, #1
  579. vorr q6,q0,q0
  580. rev r10, r10
  581. vmov.32 d13[1],r10
  582. add r8, r8, #2
  583. vorr q1,q6,q6
  584. bls Lctr32_tail
  585. rev r12, r8
  586. vmov.32 d13[1],r12
  587. sub r2,r2,#3 @ bias
  588. vorr q10,q6,q6
  589. b Loop3x_ctr32
  590. .align 4
  591. Loop3x_ctr32:
  592. .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
  593. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  594. .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
  595. .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
  596. .byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
  597. .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
  598. vld1.32 {q8},[r7]!
  599. subs r6,r6,#2
  600. .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
  601. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  602. .byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
  603. .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
  604. .byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
  605. .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
  606. vld1.32 {q9},[r7]!
  607. bgt Loop3x_ctr32
  608. .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
  609. .byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0
  610. .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
  611. .byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1
  612. vld1.8 {q2},[r0]!
  613. add r9,r8,#1
  614. .byte 0x20,0x43,0xf0,0xf3 @ aese q10,q8
  615. .byte 0xa4,0x43,0xf0,0xf3 @ aesmc q10,q10
  616. vld1.8 {q3},[r0]!
  617. rev r9,r9
  618. .byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9
  619. .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
  620. .byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9
  621. .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
  622. vld1.8 {q11},[r0]!
  623. mov r7,r3
  624. .byte 0x22,0x43,0xf0,0xf3 @ aese q10,q9
  625. .byte 0xa4,0x23,0xf0,0xf3 @ aesmc q9,q10
  626. .byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12
  627. .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
  628. .byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12
  629. .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
  630. veor q2,q2,q7
  631. add r10,r8,#2
  632. .byte 0x28,0x23,0xf0,0xf3 @ aese q9,q12
  633. .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
  634. veor q3,q3,q7
  635. add r8,r8,#3
  636. .byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13
  637. .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
  638. .byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13
  639. .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
  640. @ Note the logic to update q0, q1, and q1 is written to work
  641. @ around a bug in ARM Cortex-A57 and Cortex-A72 cores running in
  642. @ 32-bit mode. See the comment above.
  643. veor q11,q11,q7
  644. vmov.32 d13[1], r9
  645. .byte 0x2a,0x23,0xf0,0xf3 @ aese q9,q13
  646. .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
  647. vorr q0,q6,q6
  648. rev r10,r10
  649. .byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14
  650. .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
  651. vmov.32 d13[1], r10
  652. rev r12,r8
  653. .byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14
  654. .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
  655. vorr q1,q6,q6
  656. vmov.32 d13[1], r12
  657. .byte 0x2c,0x23,0xf0,0xf3 @ aese q9,q14
  658. .byte 0xa2,0x23,0xf0,0xf3 @ aesmc q9,q9
  659. vorr q10,q6,q6
  660. subs r2,r2,#3
  661. .byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15
  662. .byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15
  663. .byte 0x2e,0x23,0xf0,0xf3 @ aese q9,q15
  664. veor q2,q2,q4
  665. vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
  666. vst1.8 {q2},[r1]!
  667. veor q3,q3,q5
  668. mov r6,r5
  669. vst1.8 {q3},[r1]!
  670. veor q11,q11,q9
  671. vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
  672. vst1.8 {q11},[r1]!
  673. bhs Loop3x_ctr32
  674. adds r2,r2,#3
  675. beq Lctr32_done
  676. cmp r2,#1
  677. mov r12,#16
  678. moveq r12,#0
  679. Lctr32_tail:
  680. .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
  681. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  682. .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
  683. .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
  684. vld1.32 {q8},[r7]!
  685. subs r6,r6,#2
  686. .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
  687. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  688. .byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
  689. .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
  690. vld1.32 {q9},[r7]!
  691. bgt Lctr32_tail
  692. .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
  693. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  694. .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
  695. .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
  696. .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
  697. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  698. .byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
  699. .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
  700. vld1.8 {q2},[r0],r12
  701. .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
  702. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  703. .byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12
  704. .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
  705. vld1.8 {q3},[r0]
  706. .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
  707. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  708. .byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13
  709. .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
  710. veor q2,q2,q7
  711. .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
  712. .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
  713. .byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14
  714. .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
  715. veor q3,q3,q7
  716. .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
  717. .byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15
  718. cmp r2,#1
  719. veor q2,q2,q0
  720. veor q3,q3,q1
  721. vst1.8 {q2},[r1]!
  722. beq Lctr32_done
  723. vst1.8 {q3},[r1]
  724. Lctr32_done:
  725. vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15}
  726. ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc}
  727. #endif
  728. #endif // !OPENSSL_NO_ASM