ghashv8-armx64.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576
  1. // This file is generated from a similarly-named Perl script in the BoringSSL
  2. // source tree. Do not edit by hand.
  3. #if !defined(__has_feature)
  4. #define __has_feature(x) 0
  5. #endif
  6. #if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
  7. #define OPENSSL_NO_ASM
  8. #endif
  9. #if !defined(OPENSSL_NO_ASM)
  10. #if defined(__aarch64__)
  11. #if defined(BORINGSSL_PREFIX)
  12. #include <boringssl_prefix_symbols_asm.h>
  13. #endif
  14. #include <openssl/arm_arch.h>
  15. #if __ARM_MAX_ARCH__>=7
  16. .text
  17. .arch armv8-a+crypto
  18. .globl gcm_init_v8
  19. .hidden gcm_init_v8
  20. .type gcm_init_v8,%function
  21. .align 4
  22. gcm_init_v8:
  23. AARCH64_VALID_CALL_TARGET
  24. ld1 {v17.2d},[x1] //load input H
  25. movi v19.16b,#0xe1
  26. shl v19.2d,v19.2d,#57 //0xc2.0
  27. ext v3.16b,v17.16b,v17.16b,#8
  28. ushr v18.2d,v19.2d,#63
  29. dup v17.4s,v17.s[1]
  30. ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
  31. ushr v18.2d,v3.2d,#63
  32. sshr v17.4s,v17.4s,#31 //broadcast carry bit
  33. and v18.16b,v18.16b,v16.16b
  34. shl v3.2d,v3.2d,#1
  35. ext v18.16b,v18.16b,v18.16b,#8
  36. and v16.16b,v16.16b,v17.16b
  37. orr v3.16b,v3.16b,v18.16b //H<<<=1
  38. eor v20.16b,v3.16b,v16.16b //twisted H
  39. st1 {v20.2d},[x0],#16 //store Htable[0]
  40. //calculate H^2
  41. ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
  42. pmull v0.1q,v20.1d,v20.1d
  43. eor v16.16b,v16.16b,v20.16b
  44. pmull2 v2.1q,v20.2d,v20.2d
  45. pmull v1.1q,v16.1d,v16.1d
  46. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  47. eor v18.16b,v0.16b,v2.16b
  48. eor v1.16b,v1.16b,v17.16b
  49. eor v1.16b,v1.16b,v18.16b
  50. pmull v18.1q,v0.1d,v19.1d //1st phase
  51. ins v2.d[0],v1.d[1]
  52. ins v1.d[1],v0.d[0]
  53. eor v0.16b,v1.16b,v18.16b
  54. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
  55. pmull v0.1q,v0.1d,v19.1d
  56. eor v18.16b,v18.16b,v2.16b
  57. eor v22.16b,v0.16b,v18.16b
  58. ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
  59. eor v17.16b,v17.16b,v22.16b
  60. ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
  61. st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
  62. //calculate H^3 and H^4
  63. pmull v0.1q,v20.1d, v22.1d
  64. pmull v5.1q,v22.1d,v22.1d
  65. pmull2 v2.1q,v20.2d, v22.2d
  66. pmull2 v7.1q,v22.2d,v22.2d
  67. pmull v1.1q,v16.1d,v17.1d
  68. pmull v6.1q,v17.1d,v17.1d
  69. ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  70. ext v17.16b,v5.16b,v7.16b,#8
  71. eor v18.16b,v0.16b,v2.16b
  72. eor v1.16b,v1.16b,v16.16b
  73. eor v4.16b,v5.16b,v7.16b
  74. eor v6.16b,v6.16b,v17.16b
  75. eor v1.16b,v1.16b,v18.16b
  76. pmull v18.1q,v0.1d,v19.1d //1st phase
  77. eor v6.16b,v6.16b,v4.16b
  78. pmull v4.1q,v5.1d,v19.1d
  79. ins v2.d[0],v1.d[1]
  80. ins v7.d[0],v6.d[1]
  81. ins v1.d[1],v0.d[0]
  82. ins v6.d[1],v5.d[0]
  83. eor v0.16b,v1.16b,v18.16b
  84. eor v5.16b,v6.16b,v4.16b
  85. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
  86. ext v4.16b,v5.16b,v5.16b,#8
  87. pmull v0.1q,v0.1d,v19.1d
  88. pmull v5.1q,v5.1d,v19.1d
  89. eor v18.16b,v18.16b,v2.16b
  90. eor v4.16b,v4.16b,v7.16b
  91. eor v20.16b, v0.16b,v18.16b //H^3
  92. eor v22.16b,v5.16b,v4.16b //H^4
  93. ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
  94. ext v17.16b,v22.16b,v22.16b,#8
  95. eor v16.16b,v16.16b,v20.16b
  96. eor v17.16b,v17.16b,v22.16b
  97. ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
  98. st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
  99. ret
  100. .size gcm_init_v8,.-gcm_init_v8
  101. .globl gcm_gmult_v8
  102. .hidden gcm_gmult_v8
  103. .type gcm_gmult_v8,%function
  104. .align 4
  105. gcm_gmult_v8:
  106. AARCH64_VALID_CALL_TARGET
  107. ld1 {v17.2d},[x0] //load Xi
  108. movi v19.16b,#0xe1
  109. ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
  110. shl v19.2d,v19.2d,#57
  111. #ifndef __ARMEB__
  112. rev64 v17.16b,v17.16b
  113. #endif
  114. ext v3.16b,v17.16b,v17.16b,#8
  115. pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
  116. eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
  117. pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
  118. pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
  119. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  120. eor v18.16b,v0.16b,v2.16b
  121. eor v1.16b,v1.16b,v17.16b
  122. eor v1.16b,v1.16b,v18.16b
  123. pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
  124. ins v2.d[0],v1.d[1]
  125. ins v1.d[1],v0.d[0]
  126. eor v0.16b,v1.16b,v18.16b
  127. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
  128. pmull v0.1q,v0.1d,v19.1d
  129. eor v18.16b,v18.16b,v2.16b
  130. eor v0.16b,v0.16b,v18.16b
  131. #ifndef __ARMEB__
  132. rev64 v0.16b,v0.16b
  133. #endif
  134. ext v0.16b,v0.16b,v0.16b,#8
  135. st1 {v0.2d},[x0] //write out Xi
  136. ret
  137. .size gcm_gmult_v8,.-gcm_gmult_v8
  138. .globl gcm_ghash_v8
  139. .hidden gcm_ghash_v8
  140. .type gcm_ghash_v8,%function
  141. .align 4
  142. gcm_ghash_v8:
  143. AARCH64_VALID_CALL_TARGET
  144. cmp x3,#64
  145. b.hs .Lgcm_ghash_v8_4x
  146. ld1 {v0.2d},[x0] //load [rotated] Xi
  147. //"[rotated]" means that
  148. //loaded value would have
  149. //to be rotated in order to
  150. //make it appear as in
  151. //algorithm specification
  152. subs x3,x3,#32 //see if x3 is 32 or larger
  153. mov x12,#16 //x12 is used as post-
  154. //increment for input pointer;
  155. //as loop is modulo-scheduled
  156. //x12 is zeroed just in time
  157. //to preclude overstepping
  158. //inp[len], which means that
  159. //last block[s] are actually
  160. //loaded twice, but last
  161. //copy is not processed
  162. ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2
  163. movi v19.16b,#0xe1
  164. ld1 {v22.2d},[x1]
  165. csel x12,xzr,x12,eq //is it time to zero x12?
  166. ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi
  167. ld1 {v16.2d},[x2],#16 //load [rotated] I[0]
  168. shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
  169. #ifndef __ARMEB__
  170. rev64 v16.16b,v16.16b
  171. rev64 v0.16b,v0.16b
  172. #endif
  173. ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0]
  174. b.lo .Lodd_tail_v8 //x3 was less than 32
  175. ld1 {v17.2d},[x2],x12 //load [rotated] I[1]
  176. #ifndef __ARMEB__
  177. rev64 v17.16b,v17.16b
  178. #endif
  179. ext v7.16b,v17.16b,v17.16b,#8
  180. eor v3.16b,v3.16b,v0.16b //I[i]^=Xi
  181. pmull v4.1q,v20.1d,v7.1d //H·Ii+1
  182. eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
  183. pmull2 v6.1q,v20.2d,v7.2d
  184. b .Loop_mod2x_v8
  185. .align 4
  186. .Loop_mod2x_v8:
  187. ext v18.16b,v3.16b,v3.16b,#8
  188. subs x3,x3,#32 //is there more data?
  189. pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo
  190. csel x12,xzr,x12,lo //is it time to zero x12?
  191. pmull v5.1q,v21.1d,v17.1d
  192. eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing
  193. pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi
  194. eor v0.16b,v0.16b,v4.16b //accumulate
  195. pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
  196. ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2]
  197. eor v2.16b,v2.16b,v6.16b
  198. csel x12,xzr,x12,eq //is it time to zero x12?
  199. eor v1.16b,v1.16b,v5.16b
  200. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  201. eor v18.16b,v0.16b,v2.16b
  202. eor v1.16b,v1.16b,v17.16b
  203. ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3]
  204. #ifndef __ARMEB__
  205. rev64 v16.16b,v16.16b
  206. #endif
  207. eor v1.16b,v1.16b,v18.16b
  208. pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
  209. #ifndef __ARMEB__
  210. rev64 v17.16b,v17.16b
  211. #endif
  212. ins v2.d[0],v1.d[1]
  213. ins v1.d[1],v0.d[0]
  214. ext v7.16b,v17.16b,v17.16b,#8
  215. ext v3.16b,v16.16b,v16.16b,#8
  216. eor v0.16b,v1.16b,v18.16b
  217. pmull v4.1q,v20.1d,v7.1d //H·Ii+1
  218. eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early
  219. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
  220. pmull v0.1q,v0.1d,v19.1d
  221. eor v3.16b,v3.16b,v18.16b
  222. eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
  223. eor v3.16b,v3.16b,v0.16b
  224. pmull2 v6.1q,v20.2d,v7.2d
  225. b.hs .Loop_mod2x_v8 //there was at least 32 more bytes
  226. eor v2.16b,v2.16b,v18.16b
  227. ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b
  228. adds x3,x3,#32 //re-construct x3
  229. eor v0.16b,v0.16b,v2.16b //re-construct v0.16b
  230. b.eq .Ldone_v8 //is x3 zero?
  231. .Lodd_tail_v8:
  232. ext v18.16b,v0.16b,v0.16b,#8
  233. eor v3.16b,v3.16b,v0.16b //inp^=Xi
  234. eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi
  235. pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
  236. eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
  237. pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
  238. pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
  239. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  240. eor v18.16b,v0.16b,v2.16b
  241. eor v1.16b,v1.16b,v17.16b
  242. eor v1.16b,v1.16b,v18.16b
  243. pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
  244. ins v2.d[0],v1.d[1]
  245. ins v1.d[1],v0.d[0]
  246. eor v0.16b,v1.16b,v18.16b
  247. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
  248. pmull v0.1q,v0.1d,v19.1d
  249. eor v18.16b,v18.16b,v2.16b
  250. eor v0.16b,v0.16b,v18.16b
  251. .Ldone_v8:
  252. #ifndef __ARMEB__
  253. rev64 v0.16b,v0.16b
  254. #endif
  255. ext v0.16b,v0.16b,v0.16b,#8
  256. st1 {v0.2d},[x0] //write out Xi
  257. ret
  258. .size gcm_ghash_v8,.-gcm_ghash_v8
  259. .type gcm_ghash_v8_4x,%function
  260. .align 4
  261. gcm_ghash_v8_4x:
  262. .Lgcm_ghash_v8_4x:
  263. ld1 {v0.2d},[x0] //load [rotated] Xi
  264. ld1 {v20.2d,v21.2d,v22.2d},[x1],#48 //load twisted H, ..., H^2
  265. movi v19.16b,#0xe1
  266. ld1 {v26.2d,v27.2d,v28.2d},[x1] //load twisted H^3, ..., H^4
  267. shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
  268. ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
  269. #ifndef __ARMEB__
  270. rev64 v0.16b,v0.16b
  271. rev64 v5.16b,v5.16b
  272. rev64 v6.16b,v6.16b
  273. rev64 v7.16b,v7.16b
  274. rev64 v4.16b,v4.16b
  275. #endif
  276. ext v25.16b,v7.16b,v7.16b,#8
  277. ext v24.16b,v6.16b,v6.16b,#8
  278. ext v23.16b,v5.16b,v5.16b,#8
  279. pmull v29.1q,v20.1d,v25.1d //H·Ii+3
  280. eor v7.16b,v7.16b,v25.16b
  281. pmull2 v31.1q,v20.2d,v25.2d
  282. pmull v30.1q,v21.1d,v7.1d
  283. pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2
  284. eor v6.16b,v6.16b,v24.16b
  285. pmull2 v24.1q,v22.2d,v24.2d
  286. pmull2 v6.1q,v21.2d,v6.2d
  287. eor v29.16b,v29.16b,v16.16b
  288. eor v31.16b,v31.16b,v24.16b
  289. eor v30.16b,v30.16b,v6.16b
  290. pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1
  291. eor v5.16b,v5.16b,v23.16b
  292. pmull2 v23.1q,v26.2d,v23.2d
  293. pmull v5.1q,v27.1d,v5.1d
  294. eor v29.16b,v29.16b,v7.16b
  295. eor v31.16b,v31.16b,v23.16b
  296. eor v30.16b,v30.16b,v5.16b
  297. subs x3,x3,#128
  298. b.lo .Ltail4x
  299. b .Loop4x
  300. .align 4
  301. .Loop4x:
  302. eor v16.16b,v4.16b,v0.16b
  303. ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
  304. ext v3.16b,v16.16b,v16.16b,#8
  305. #ifndef __ARMEB__
  306. rev64 v5.16b,v5.16b
  307. rev64 v6.16b,v6.16b
  308. rev64 v7.16b,v7.16b
  309. rev64 v4.16b,v4.16b
  310. #endif
  311. pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii)
  312. eor v16.16b,v16.16b,v3.16b
  313. pmull2 v2.1q,v28.2d,v3.2d
  314. ext v25.16b,v7.16b,v7.16b,#8
  315. pmull2 v1.1q,v27.2d,v16.2d
  316. eor v0.16b,v0.16b,v29.16b
  317. eor v2.16b,v2.16b,v31.16b
  318. ext v24.16b,v6.16b,v6.16b,#8
  319. eor v1.16b,v1.16b,v30.16b
  320. ext v23.16b,v5.16b,v5.16b,#8
  321. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  322. eor v18.16b,v0.16b,v2.16b
  323. pmull v29.1q,v20.1d,v25.1d //H·Ii+3
  324. eor v7.16b,v7.16b,v25.16b
  325. eor v1.16b,v1.16b,v17.16b
  326. pmull2 v31.1q,v20.2d,v25.2d
  327. eor v1.16b,v1.16b,v18.16b
  328. pmull v30.1q,v21.1d,v7.1d
  329. pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
  330. ins v2.d[0],v1.d[1]
  331. ins v1.d[1],v0.d[0]
  332. pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2
  333. eor v6.16b,v6.16b,v24.16b
  334. pmull2 v24.1q,v22.2d,v24.2d
  335. eor v0.16b,v1.16b,v18.16b
  336. pmull2 v6.1q,v21.2d,v6.2d
  337. eor v29.16b,v29.16b,v16.16b
  338. eor v31.16b,v31.16b,v24.16b
  339. eor v30.16b,v30.16b,v6.16b
  340. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
  341. pmull v0.1q,v0.1d,v19.1d
  342. pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1
  343. eor v5.16b,v5.16b,v23.16b
  344. eor v18.16b,v18.16b,v2.16b
  345. pmull2 v23.1q,v26.2d,v23.2d
  346. pmull v5.1q,v27.1d,v5.1d
  347. eor v0.16b,v0.16b,v18.16b
  348. eor v29.16b,v29.16b,v7.16b
  349. eor v31.16b,v31.16b,v23.16b
  350. ext v0.16b,v0.16b,v0.16b,#8
  351. eor v30.16b,v30.16b,v5.16b
  352. subs x3,x3,#64
  353. b.hs .Loop4x
  354. .Ltail4x:
  355. eor v16.16b,v4.16b,v0.16b
  356. ext v3.16b,v16.16b,v16.16b,#8
  357. pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii)
  358. eor v16.16b,v16.16b,v3.16b
  359. pmull2 v2.1q,v28.2d,v3.2d
  360. pmull2 v1.1q,v27.2d,v16.2d
  361. eor v0.16b,v0.16b,v29.16b
  362. eor v2.16b,v2.16b,v31.16b
  363. eor v1.16b,v1.16b,v30.16b
  364. adds x3,x3,#64
  365. b.eq .Ldone4x
  366. cmp x3,#32
  367. b.lo .Lone
  368. b.eq .Ltwo
  369. .Lthree:
  370. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  371. eor v18.16b,v0.16b,v2.16b
  372. eor v1.16b,v1.16b,v17.16b
  373. ld1 {v4.2d,v5.2d,v6.2d},[x2]
  374. eor v1.16b,v1.16b,v18.16b
  375. #ifndef __ARMEB__
  376. rev64 v5.16b,v5.16b
  377. rev64 v6.16b,v6.16b
  378. rev64 v4.16b,v4.16b
  379. #endif
  380. pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
  381. ins v2.d[0],v1.d[1]
  382. ins v1.d[1],v0.d[0]
  383. ext v24.16b,v6.16b,v6.16b,#8
  384. ext v23.16b,v5.16b,v5.16b,#8
  385. eor v0.16b,v1.16b,v18.16b
  386. pmull v29.1q,v20.1d,v24.1d //H·Ii+2
  387. eor v6.16b,v6.16b,v24.16b
  388. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
  389. pmull v0.1q,v0.1d,v19.1d
  390. eor v18.16b,v18.16b,v2.16b
  391. pmull2 v31.1q,v20.2d,v24.2d
  392. pmull v30.1q,v21.1d,v6.1d
  393. eor v0.16b,v0.16b,v18.16b
  394. pmull v7.1q,v22.1d,v23.1d //H^2·Ii+1
  395. eor v5.16b,v5.16b,v23.16b
  396. ext v0.16b,v0.16b,v0.16b,#8
  397. pmull2 v23.1q,v22.2d,v23.2d
  398. eor v16.16b,v4.16b,v0.16b
  399. pmull2 v5.1q,v21.2d,v5.2d
  400. ext v3.16b,v16.16b,v16.16b,#8
  401. eor v29.16b,v29.16b,v7.16b
  402. eor v31.16b,v31.16b,v23.16b
  403. eor v30.16b,v30.16b,v5.16b
  404. pmull v0.1q,v26.1d,v3.1d //H^3·(Xi+Ii)
  405. eor v16.16b,v16.16b,v3.16b
  406. pmull2 v2.1q,v26.2d,v3.2d
  407. pmull v1.1q,v27.1d,v16.1d
  408. eor v0.16b,v0.16b,v29.16b
  409. eor v2.16b,v2.16b,v31.16b
  410. eor v1.16b,v1.16b,v30.16b
  411. b .Ldone4x
  412. .align 4
  413. .Ltwo:
  414. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  415. eor v18.16b,v0.16b,v2.16b
  416. eor v1.16b,v1.16b,v17.16b
  417. ld1 {v4.2d,v5.2d},[x2]
  418. eor v1.16b,v1.16b,v18.16b
  419. #ifndef __ARMEB__
  420. rev64 v5.16b,v5.16b
  421. rev64 v4.16b,v4.16b
  422. #endif
  423. pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
  424. ins v2.d[0],v1.d[1]
  425. ins v1.d[1],v0.d[0]
  426. ext v23.16b,v5.16b,v5.16b,#8
  427. eor v0.16b,v1.16b,v18.16b
  428. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
  429. pmull v0.1q,v0.1d,v19.1d
  430. eor v18.16b,v18.16b,v2.16b
  431. eor v0.16b,v0.16b,v18.16b
  432. ext v0.16b,v0.16b,v0.16b,#8
  433. pmull v29.1q,v20.1d,v23.1d //H·Ii+1
  434. eor v5.16b,v5.16b,v23.16b
  435. eor v16.16b,v4.16b,v0.16b
  436. ext v3.16b,v16.16b,v16.16b,#8
  437. pmull2 v31.1q,v20.2d,v23.2d
  438. pmull v30.1q,v21.1d,v5.1d
  439. pmull v0.1q,v22.1d,v3.1d //H^2·(Xi+Ii)
  440. eor v16.16b,v16.16b,v3.16b
  441. pmull2 v2.1q,v22.2d,v3.2d
  442. pmull2 v1.1q,v21.2d,v16.2d
  443. eor v0.16b,v0.16b,v29.16b
  444. eor v2.16b,v2.16b,v31.16b
  445. eor v1.16b,v1.16b,v30.16b
  446. b .Ldone4x
  447. .align 4
  448. .Lone:
  449. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  450. eor v18.16b,v0.16b,v2.16b
  451. eor v1.16b,v1.16b,v17.16b
  452. ld1 {v4.2d},[x2]
  453. eor v1.16b,v1.16b,v18.16b
  454. #ifndef __ARMEB__
  455. rev64 v4.16b,v4.16b
  456. #endif
  457. pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
  458. ins v2.d[0],v1.d[1]
  459. ins v1.d[1],v0.d[0]
  460. eor v0.16b,v1.16b,v18.16b
  461. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
  462. pmull v0.1q,v0.1d,v19.1d
  463. eor v18.16b,v18.16b,v2.16b
  464. eor v0.16b,v0.16b,v18.16b
  465. ext v0.16b,v0.16b,v0.16b,#8
  466. eor v16.16b,v4.16b,v0.16b
  467. ext v3.16b,v16.16b,v16.16b,#8
  468. pmull v0.1q,v20.1d,v3.1d
  469. eor v16.16b,v16.16b,v3.16b
  470. pmull2 v2.1q,v20.2d,v3.2d
  471. pmull v1.1q,v21.1d,v16.1d
  472. .Ldone4x:
  473. ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
  474. eor v18.16b,v0.16b,v2.16b
  475. eor v1.16b,v1.16b,v17.16b
  476. eor v1.16b,v1.16b,v18.16b
  477. pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
  478. ins v2.d[0],v1.d[1]
  479. ins v1.d[1],v0.d[0]
  480. eor v0.16b,v1.16b,v18.16b
  481. ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
  482. pmull v0.1q,v0.1d,v19.1d
  483. eor v18.16b,v18.16b,v2.16b
  484. eor v0.16b,v0.16b,v18.16b
  485. ext v0.16b,v0.16b,v0.16b,#8
  486. #ifndef __ARMEB__
  487. rev64 v0.16b,v0.16b
  488. #endif
  489. st1 {v0.2d},[x0] //write out Xi
  490. ret
  491. .size gcm_ghash_v8_4x,.-gcm_ghash_v8_4x
  492. .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
  493. .align 2
  494. .align 2
  495. #endif
  496. #endif
  497. #endif // !OPENSSL_NO_ASM
  498. .section .note.GNU-stack,"",%progbits