aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2020-04-21 19:03:24 +0200
committerNatanael Copa <ncopa@alpinelinux.org>2020-04-21 19:16:29 +0200
commit9919f140cf7d3ff305dda398a2a2605489202e60 (patch)
tree9d244c7ca82d92ee564f78a91192d27f6ff4110a
parenta7046c08fb7fb4f70653bc4bc48c94c623c1921b (diff)
downloadalpine_aports-9919f140cf7d3ff305dda398a2a2605489202e60.tar.bz2
alpine_aports-9919f140cf7d3ff305dda398a2a2605489202e60.tar.xz
alpine_aports-9919f140cf7d3ff305dda398a2a2605489202e60.zip
main/openssl: security upgrade to 1.1.1g (CVE-2020-1967)
fixes #11429
-rw-r--r--main/openssl/APKBUILD12
-rw-r--r--main/openssl/CVE-2019-1551.patch757
-rw-r--r--main/openssl/man-section.patch4
3 files changed, 8 insertions, 765 deletions
diff --git a/main/openssl/APKBUILD b/main/openssl/APKBUILD
index bcceb4dda2..8ecefb4f55 100644
--- a/main/openssl/APKBUILD
+++ b/main/openssl/APKBUILD
@@ -1,8 +1,8 @@
1# Maintainer: Timo Teras <timo.teras@iki.fi> 1# Maintainer: Timo Teras <timo.teras@iki.fi>
2pkgname=openssl 2pkgname=openssl
3pkgver=1.1.1d 3pkgver=1.1.1g
4_abiver=${pkgver%.*} 4_abiver=${pkgver%.*}
5pkgrel=2 5pkgrel=0
6pkgdesc="Toolkit for Transport Layer Security (TLS)" 6pkgdesc="Toolkit for Transport Layer Security (TLS)"
7url="https://www.openssl.org" 7url="https://www.openssl.org"
8arch="all" 8arch="all"
@@ -14,7 +14,6 @@ makedepends="$makedepends_host $makedepends_build"
14subpackages="$pkgname-dbg $pkgname-dev $pkgname-doc libcrypto$_abiver:_libcrypto libssl$_abiver:_libssl" 14subpackages="$pkgname-dbg $pkgname-dev $pkgname-doc libcrypto$_abiver:_libcrypto libssl$_abiver:_libssl"
15source="https://www.openssl.org/source/openssl-$pkgver.tar.gz 15source="https://www.openssl.org/source/openssl-$pkgver.tar.gz
16 man-section.patch 16 man-section.patch
17 CVE-2019-1551.patch
18 " 17 "
19case "$CARCH" in 18case "$CARCH" in
20s390x) options="$options !check";; # FIXME: test hangs 19s390x) options="$options !check";; # FIXME: test hangs
@@ -23,6 +22,8 @@ esac
23builddir="$srcdir/openssl-$pkgver" 22builddir="$srcdir/openssl-$pkgver"
24 23
25# secfixes: 24# secfixes:
25# 1.1.1g-r0:
26# - CVE-2020-1967
26# 1.1.1d-r2: 27# 1.1.1d-r2:
27# - CVE-2019-1551 28# - CVE-2019-1551
28# 1.1.1d-r0: 29# 1.1.1d-r0:
@@ -112,6 +113,5 @@ _libssl() {
112 done 113 done
113} 114}
114 115
115sha512sums="2bc9f528c27fe644308eb7603c992bac8740e9f0c3601a130af30c9ffebbf7e0f5c28b76a00bbb478bad40fbe89b4223a58d604001e1713da71ff4b7fe6a08a7 openssl-1.1.1d.tar.gz 116sha512sums="01e3d0b1bceeed8fb066f542ef5480862001556e0f612e017442330bbd7e5faee228b2de3513d7fc347446b7f217e27de1003dc9d7214d5833b97593f3ec25ab openssl-1.1.1g.tar.gz
1163e5c425d219768721d38bb33db7445eb3ea12d9447a16c5b23b9fddfcbd9d40b98b39506aeac9cbaced4be22ad5a6cb8e4d16fbe4850ac50a6b0c716592b2a2b man-section.patch 11743c3255118db6f5f340dc865c0f25ccbcafe5bf7507585244ca59b4d27daf533d6c3171aa32a8685cbb6200104bec535894b633de13feaadff87ab86739a445a man-section.patch"
11711ca61515a89766241fe0fae27f3b39767128915f288ea88840bf93e8b50ac416024cb2153efcdf2658d3e82a8e4250a0c069333dbd7347475f9dafcc45370b5 CVE-2019-1551.patch"
diff --git a/main/openssl/CVE-2019-1551.patch b/main/openssl/CVE-2019-1551.patch
deleted file mode 100644
index 8daf04ebf9..0000000000
--- a/main/openssl/CVE-2019-1551.patch
+++ /dev/null
@@ -1,757 +0,0 @@
1From 419102400a2811582a7a3d4a4e317d72e5ce0a8f Mon Sep 17 00:00:00 2001
2From: Andy Polyakov <appro@openssl.org>
3Date: Wed, 4 Dec 2019 12:48:21 +0100
4Subject: [PATCH] Fix an overflow bug in rsaz_512_sqr
5
6There is an overflow bug in the x64_64 Montgomery squaring procedure used in
7exponentiation with 512-bit moduli. No EC algorithms are affected. Analysis
8suggests that attacks against 2-prime RSA1024, 3-prime RSA1536, and DSA1024 as a
9result of this defect would be very difficult to perform and are not believed
10likely. Attacks against DH512 are considered just feasible. However, for an
11attack the target would have to re-use the DH512 private key, which is not
12recommended anyway. Also applications directly using the low level API
13BN_mod_exp may be affected if they use BN_FLG_CONSTTIME.
14
15CVE-2019-1551
16
17Reviewed-by: Paul Dale <paul.dale@oracle.com>
18Reviewed-by: Bernd Edlinger <bernd.edlinger@hotmail.de>
19(Merged from https://github.com/openssl/openssl/pull/10575)
20---
21 crypto/bn/asm/rsaz-x86_64.pl | 381 ++++++++++++++++++-----------------
22 1 file changed, 197 insertions(+), 184 deletions(-)
23
24diff --git a/crypto/bn/asm/rsaz-x86_64.pl b/crypto/bn/asm/rsaz-x86_64.pl
25index b1797b649f..7534d5cd03 100755
26--- a/crypto/bn/asm/rsaz-x86_64.pl
27+++ b/crypto/bn/asm/rsaz-x86_64.pl
28@@ -116,7 +116,7 @@ rsaz_512_sqr: # 25-29% faster than rsaz_512_mul
29 subq \$128+24, %rsp
30 .cfi_adjust_cfa_offset 128+24
31 .Lsqr_body:
32- movq $mod, %rbp # common argument
33+ movq $mod, %xmm1 # common off-load
34 movq ($inp), %rdx
35 movq 8($inp), %rax
36 movq $n0, 128(%rsp)
37@@ -134,7 +134,8 @@ $code.=<<___;
38 .Loop_sqr:
39 movl $times,128+8(%rsp)
40 #first iteration
41- movq %rdx, %rbx
42+ movq %rdx, %rbx # 0($inp)
43+ mov %rax, %rbp # 8($inp)
44 mulq %rdx
45 movq %rax, %r8
46 movq 16($inp), %rax
47@@ -173,31 +174,29 @@ $code.=<<___;
48 mulq %rbx
49 addq %rax, %r14
50 movq %rbx, %rax
51- movq %rdx, %r15
52- adcq \$0, %r15
53+ adcq \$0, %rdx
54
55- addq %r8, %r8 #shlq \$1, %r8
56- movq %r9, %rcx
57- adcq %r9, %r9 #shld \$1, %r8, %r9
58+ xorq %rcx,%rcx # rcx:r8 = r8 << 1
59+ addq %r8, %r8
60+ movq %rdx, %r15
61+ adcq \$0, %rcx
62
63 mulq %rax
64- movq %rax, (%rsp)
65- addq %rdx, %r8
66- adcq \$0, %r9
67+ addq %r8, %rdx
68+ adcq \$0, %rcx
69
70- movq %r8, 8(%rsp)
71- shrq \$63, %rcx
72+ movq %rax, (%rsp)
73+ movq %rdx, 8(%rsp)
74
75 #second iteration
76- movq 8($inp), %r8
77 movq 16($inp), %rax
78- mulq %r8
79+ mulq %rbp
80 addq %rax, %r10
81 movq 24($inp), %rax
82 movq %rdx, %rbx
83 adcq \$0, %rbx
84
85- mulq %r8
86+ mulq %rbp
87 addq %rax, %r11
88 movq 32($inp), %rax
89 adcq \$0, %rdx
90@@ -205,7 +204,7 @@ $code.=<<___;
91 movq %rdx, %rbx
92 adcq \$0, %rbx
93
94- mulq %r8
95+ mulq %rbp
96 addq %rax, %r12
97 movq 40($inp), %rax
98 adcq \$0, %rdx
99@@ -213,7 +212,7 @@ $code.=<<___;
100 movq %rdx, %rbx
101 adcq \$0, %rbx
102
103- mulq %r8
104+ mulq %rbp
105 addq %rax, %r13
106 movq 48($inp), %rax
107 adcq \$0, %rdx
108@@ -221,7 +220,7 @@ $code.=<<___;
109 movq %rdx, %rbx
110 adcq \$0, %rbx
111
112- mulq %r8
113+ mulq %rbp
114 addq %rax, %r14
115 movq 56($inp), %rax
116 adcq \$0, %rdx
117@@ -229,39 +228,39 @@ $code.=<<___;
118 movq %rdx, %rbx
119 adcq \$0, %rbx
120
121- mulq %r8
122+ mulq %rbp
123 addq %rax, %r15
124- movq %r8, %rax
125+ movq %rbp, %rax
126 adcq \$0, %rdx
127 addq %rbx, %r15
128- movq %rdx, %r8
129- movq %r10, %rdx
130- adcq \$0, %r8
131+ adcq \$0, %rdx
132
133- add %rdx, %rdx
134- lea (%rcx,%r10,2), %r10 #shld \$1, %rcx, %r10
135- movq %r11, %rbx
136- adcq %r11, %r11 #shld \$1, %r10, %r11
137+ xorq %rbx, %rbx # rbx:r10:r9 = r10:r9 << 1
138+ addq %r9, %r9
139+ movq %rdx, %r8
140+ adcq %r10, %r10
141+ adcq \$0, %rbx
142
143 mulq %rax
144+ addq %rcx, %rax
145+ movq 16($inp), %rbp
146+ adcq \$0, %rdx
147 addq %rax, %r9
148+ movq 24($inp), %rax
149 adcq %rdx, %r10
150- adcq \$0, %r11
151+ adcq \$0, %rbx
152
153 movq %r9, 16(%rsp)
154 movq %r10, 24(%rsp)
155- shrq \$63, %rbx
156
157 #third iteration
158- movq 16($inp), %r9
159- movq 24($inp), %rax
160- mulq %r9
161+ mulq %rbp
162 addq %rax, %r12
163 movq 32($inp), %rax
164 movq %rdx, %rcx
165 adcq \$0, %rcx
166
167- mulq %r9
168+ mulq %rbp
169 addq %rax, %r13
170 movq 40($inp), %rax
171 adcq \$0, %rdx
172@@ -269,7 +268,7 @@ $code.=<<___;
173 movq %rdx, %rcx
174 adcq \$0, %rcx
175
176- mulq %r9
177+ mulq %rbp
178 addq %rax, %r14
179 movq 48($inp), %rax
180 adcq \$0, %rdx
181@@ -277,9 +276,7 @@ $code.=<<___;
182 movq %rdx, %rcx
183 adcq \$0, %rcx
184
185- mulq %r9
186- movq %r12, %r10
187- lea (%rbx,%r12,2), %r12 #shld \$1, %rbx, %r12
188+ mulq %rbp
189 addq %rax, %r15
190 movq 56($inp), %rax
191 adcq \$0, %rdx
192@@ -287,36 +284,40 @@ $code.=<<___;
193 movq %rdx, %rcx
194 adcq \$0, %rcx
195
196- mulq %r9
197- shrq \$63, %r10
198+ mulq %rbp
199 addq %rax, %r8
200- movq %r9, %rax
201+ movq %rbp, %rax
202 adcq \$0, %rdx
203 addq %rcx, %r8
204- movq %rdx, %r9
205- adcq \$0, %r9
206+ adcq \$0, %rdx
207
208- movq %r13, %rcx
209- leaq (%r10,%r13,2), %r13 #shld \$1, %r12, %r13
210+ xorq %rcx, %rcx # rcx:r12:r11 = r12:r11 << 1
211+ addq %r11, %r11
212+ movq %rdx, %r9
213+ adcq %r12, %r12
214+ adcq \$0, %rcx
215
216 mulq %rax
217+ addq %rbx, %rax
218+ movq 24($inp), %r10
219+ adcq \$0, %rdx
220 addq %rax, %r11
221+ movq 32($inp), %rax
222 adcq %rdx, %r12
223- adcq \$0, %r13
224+ adcq \$0, %rcx
225
226 movq %r11, 32(%rsp)
227 movq %r12, 40(%rsp)
228- shrq \$63, %rcx
229
230 #fourth iteration
231- movq 24($inp), %r10
232- movq 32($inp), %rax
233+ mov %rax, %r11 # 32($inp)
234 mulq %r10
235 addq %rax, %r14
236 movq 40($inp), %rax
237 movq %rdx, %rbx
238 adcq \$0, %rbx
239
240+ mov %rax, %r12 # 40($inp)
241 mulq %r10
242 addq %rax, %r15
243 movq 48($inp), %rax
244@@ -325,9 +326,8 @@ $code.=<<___;
245 movq %rdx, %rbx
246 adcq \$0, %rbx
247
248+ mov %rax, %rbp # 48($inp)
249 mulq %r10
250- movq %r14, %r12
251- leaq (%rcx,%r14,2), %r14 #shld \$1, %rcx, %r14
252 addq %rax, %r8
253 movq 56($inp), %rax
254 adcq \$0, %rdx
255@@ -336,32 +336,33 @@ $code.=<<___;
256 adcq \$0, %rbx
257
258 mulq %r10
259- shrq \$63, %r12
260 addq %rax, %r9
261 movq %r10, %rax
262 adcq \$0, %rdx
263 addq %rbx, %r9
264- movq %rdx, %r10
265- adcq \$0, %r10
266+ adcq \$0, %rdx
267
268- movq %r15, %rbx
269- leaq (%r12,%r15,2),%r15 #shld \$1, %r14, %r15
270+ xorq %rbx, %rbx # rbx:r13:r14 = r13:r14 << 1
271+ addq %r13, %r13
272+ movq %rdx, %r10
273+ adcq %r14, %r14
274+ adcq \$0, %rbx
275
276 mulq %rax
277+ addq %rcx, %rax
278+ adcq \$0, %rdx
279 addq %rax, %r13
280+ movq %r12, %rax # 40($inp)
281 adcq %rdx, %r14
282- adcq \$0, %r15
283+ adcq \$0, %rbx
284
285 movq %r13, 48(%rsp)
286 movq %r14, 56(%rsp)
287- shrq \$63, %rbx
288
289 #fifth iteration
290- movq 32($inp), %r11
291- movq 40($inp), %rax
292 mulq %r11
293 addq %rax, %r8
294- movq 48($inp), %rax
295+ movq %rbp, %rax # 48($inp)
296 movq %rdx, %rcx
297 adcq \$0, %rcx
298
299@@ -369,97 +370,99 @@ $code.=<<___;
300 addq %rax, %r9
301 movq 56($inp), %rax
302 adcq \$0, %rdx
303- movq %r8, %r12
304- leaq (%rbx,%r8,2), %r8 #shld \$1, %rbx, %r8
305 addq %rcx, %r9
306 movq %rdx, %rcx
307 adcq \$0, %rcx
308
309+ mov %rax, %r14 # 56($inp)
310 mulq %r11
311- shrq \$63, %r12
312 addq %rax, %r10
313 movq %r11, %rax
314 adcq \$0, %rdx
315 addq %rcx, %r10
316- movq %rdx, %r11
317- adcq \$0, %r11
318+ adcq \$0, %rdx
319
320- movq %r9, %rcx
321- leaq (%r12,%r9,2), %r9 #shld \$1, %r8, %r9
322+ xorq %rcx, %rcx # rcx:r8:r15 = r8:r15 << 1
323+ addq %r15, %r15
324+ movq %rdx, %r11
325+ adcq %r8, %r8
326+ adcq \$0, %rcx
327
328 mulq %rax
329+ addq %rbx, %rax
330+ adcq \$0, %rdx
331 addq %rax, %r15
332+ movq %rbp, %rax # 48($inp)
333 adcq %rdx, %r8
334- adcq \$0, %r9
335+ adcq \$0, %rcx
336
337 movq %r15, 64(%rsp)
338 movq %r8, 72(%rsp)
339- shrq \$63, %rcx
340
341 #sixth iteration
342- movq 40($inp), %r12
343- movq 48($inp), %rax
344 mulq %r12
345 addq %rax, %r10
346- movq 56($inp), %rax
347+ movq %r14, %rax # 56($inp)
348 movq %rdx, %rbx
349 adcq \$0, %rbx
350
351 mulq %r12
352 addq %rax, %r11
353 movq %r12, %rax
354- movq %r10, %r15
355- leaq (%rcx,%r10,2), %r10 #shld \$1, %rcx, %r10
356 adcq \$0, %rdx
357- shrq \$63, %r15
358 addq %rbx, %r11
359- movq %rdx, %r12
360- adcq \$0, %r12
361+ adcq \$0, %rdx
362
363- movq %r11, %rbx
364- leaq (%r15,%r11,2), %r11 #shld \$1, %r10, %r11
365+ xorq %rbx, %rbx # rbx:r10:r9 = r10:r9 << 1
366+ addq %r9, %r9
367+ movq %rdx, %r12
368+ adcq %r10, %r10
369+ adcq \$0, %rbx
370
371 mulq %rax
372+ addq %rcx, %rax
373+ adcq \$0, %rdx
374 addq %rax, %r9
375+ movq %r14, %rax # 56($inp)
376 adcq %rdx, %r10
377- adcq \$0, %r11
378+ adcq \$0, %rbx
379
380 movq %r9, 80(%rsp)
381 movq %r10, 88(%rsp)
382
383 #seventh iteration
384- movq 48($inp), %r13
385- movq 56($inp), %rax
386- mulq %r13
387+ mulq %rbp
388 addq %rax, %r12
389- movq %r13, %rax
390- movq %rdx, %r13
391- adcq \$0, %r13
392+ movq %rbp, %rax
393+ adcq \$0, %rdx
394
395- xorq %r14, %r14
396- shlq \$1, %rbx
397- adcq %r12, %r12 #shld \$1, %rbx, %r12
398- adcq %r13, %r13 #shld \$1, %r12, %r13
399- adcq %r14, %r14 #shld \$1, %r13, %r14
400+ xorq %rcx, %rcx # rcx:r12:r11 = r12:r11 << 1
401+ addq %r11, %r11
402+ movq %rdx, %r13
403+ adcq %r12, %r12
404+ adcq \$0, %rcx
405
406 mulq %rax
407+ addq %rbx, %rax
408+ adcq \$0, %rdx
409 addq %rax, %r11
410+ movq %r14, %rax # 56($inp)
411 adcq %rdx, %r12
412- adcq \$0, %r13
413+ adcq \$0, %rcx
414
415 movq %r11, 96(%rsp)
416 movq %r12, 104(%rsp)
417
418 #eighth iteration
419- movq 56($inp), %rax
420+ xorq %rbx, %rbx # rbx:r13 = r13 << 1
421+ addq %r13, %r13
422+ adcq \$0, %rbx
423+
424 mulq %rax
425- addq %rax, %r13
426+ addq %rcx, %rax
427 adcq \$0, %rdx
428-
429- addq %rdx, %r14
430-
431- movq %r13, 112(%rsp)
432- movq %r14, 120(%rsp)
433+ addq %r13, %rax
434+ adcq %rbx, %rdx
435
436 movq (%rsp), %r8
437 movq 8(%rsp), %r9
438@@ -469,6 +472,10 @@ $code.=<<___;
439 movq 40(%rsp), %r13
440 movq 48(%rsp), %r14
441 movq 56(%rsp), %r15
442+ movq %xmm1, %rbp
443+
444+ movq %rax, 112(%rsp)
445+ movq %rdx, 120(%rsp)
446
447 call __rsaz_512_reduce
448
449@@ -500,9 +507,9 @@ $code.=<<___;
450 .Loop_sqrx:
451 movl $times,128+8(%rsp)
452 movq $out, %xmm0 # off-load
453- movq %rbp, %xmm1 # off-load
454 #first iteration
455 mulx %rax, %r8, %r9
456+ mov %rax, %rbx
457
458 mulx 16($inp), %rcx, %r10
459 xor %rbp, %rbp # cf=0, of=0
460@@ -510,40 +517,39 @@ $code.=<<___;
461 mulx 24($inp), %rax, %r11
462 adcx %rcx, %r9
463
464- mulx 32($inp), %rcx, %r12
465+ .byte 0xc4,0x62,0xf3,0xf6,0xa6,0x20,0x00,0x00,0x00 # mulx 32($inp), %rcx, %r12
466 adcx %rax, %r10
467
468- mulx 40($inp), %rax, %r13
469+ .byte 0xc4,0x62,0xfb,0xf6,0xae,0x28,0x00,0x00,0x00 # mulx 40($inp), %rax, %r13
470 adcx %rcx, %r11
471
472- .byte 0xc4,0x62,0xf3,0xf6,0xb6,0x30,0x00,0x00,0x00 # mulx 48($inp), %rcx, %r14
473+ mulx 48($inp), %rcx, %r14
474 adcx %rax, %r12
475 adcx %rcx, %r13
476
477- .byte 0xc4,0x62,0xfb,0xf6,0xbe,0x38,0x00,0x00,0x00 # mulx 56($inp), %rax, %r15
478+ mulx 56($inp), %rax, %r15
479 adcx %rax, %r14
480 adcx %rbp, %r15 # %rbp is 0
481
482- mov %r9, %rcx
483- shld \$1, %r8, %r9
484- shl \$1, %r8
485-
486- xor %ebp, %ebp
487- mulx %rdx, %rax, %rdx
488- adcx %rdx, %r8
489- mov 8($inp), %rdx
490- adcx %rbp, %r9
491+ mulx %rdx, %rax, $out
492+ mov %rbx, %rdx # 8($inp)
493+ xor %rcx, %rcx
494+ adox %r8, %r8
495+ adcx $out, %r8
496+ adox %rbp, %rcx
497+ adcx %rbp, %rcx
498
499 mov %rax, (%rsp)
500 mov %r8, 8(%rsp)
501
502 #second iteration
503- mulx 16($inp), %rax, %rbx
504+ .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x10,0x00,0x00,0x00 # mulx 16($inp), %rax, %rbx
505 adox %rax, %r10
506 adcx %rbx, %r11
507
508- .byte 0xc4,0x62,0xc3,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 24($inp), $out, %r8
509+ mulx 24($inp), $out, %r8
510 adox $out, %r11
511+ .byte 0x66
512 adcx %r8, %r12
513
514 mulx 32($inp), %rax, %rbx
515@@ -561,24 +567,25 @@ $code.=<<___;
516 .byte 0xc4,0x62,0xc3,0xf6,0x86,0x38,0x00,0x00,0x00 # mulx 56($inp), $out, %r8
517 adox $out, %r15
518 adcx %rbp, %r8
519+ mulx %rdx, %rax, $out
520 adox %rbp, %r8
521+ .byte 0x48,0x8b,0x96,0x10,0x00,0x00,0x00 # mov 16($inp), %rdx
522
523- mov %r11, %rbx
524- shld \$1, %r10, %r11
525- shld \$1, %rcx, %r10
526-
527- xor %ebp,%ebp
528- mulx %rdx, %rax, %rcx
529- mov 16($inp), %rdx
530+ xor %rbx, %rbx
531+ adcx %rcx, %rax
532+ adox %r9, %r9
533+ adcx %rbp, $out
534+ adox %r10, %r10
535 adcx %rax, %r9
536- adcx %rcx, %r10
537- adcx %rbp, %r11
538+ adox %rbp, %rbx
539+ adcx $out, %r10
540+ adcx %rbp, %rbx
541
542 mov %r9, 16(%rsp)
543 .byte 0x4c,0x89,0x94,0x24,0x18,0x00,0x00,0x00 # mov %r10, 24(%rsp)
544
545 #third iteration
546- .byte 0xc4,0x62,0xc3,0xf6,0x8e,0x18,0x00,0x00,0x00 # mulx 24($inp), $out, %r9
547+ mulx 24($inp), $out, %r9
548 adox $out, %r12
549 adcx %r9, %r13
550
551@@ -586,7 +593,7 @@ $code.=<<___;
552 adox %rax, %r13
553 adcx %rcx, %r14
554
555- mulx 40($inp), $out, %r9
556+ .byte 0xc4,0x62,0xc3,0xf6,0x8e,0x28,0x00,0x00,0x00 # mulx 40($inp), $out, %r9
557 adox $out, %r14
558 adcx %r9, %r15
559
560@@ -594,27 +601,28 @@ $code.=<<___;
561 adox %rax, %r15
562 adcx %rcx, %r8
563
564- .byte 0xc4,0x62,0xc3,0xf6,0x8e,0x38,0x00,0x00,0x00 # mulx 56($inp), $out, %r9
565+ mulx 56($inp), $out, %r9
566 adox $out, %r8
567 adcx %rbp, %r9
568+ mulx %rdx, %rax, $out
569 adox %rbp, %r9
570+ mov 24($inp), %rdx
571
572- mov %r13, %rcx
573- shld \$1, %r12, %r13
574- shld \$1, %rbx, %r12
575-
576- xor %ebp, %ebp
577- mulx %rdx, %rax, %rdx
578+ xor %rcx, %rcx
579+ adcx %rbx, %rax
580+ adox %r11, %r11
581+ adcx %rbp, $out
582+ adox %r12, %r12
583 adcx %rax, %r11
584- adcx %rdx, %r12
585- mov 24($inp), %rdx
586- adcx %rbp, %r13
587+ adox %rbp, %rcx
588+ adcx $out, %r12
589+ adcx %rbp, %rcx
590
591 mov %r11, 32(%rsp)
592- .byte 0x4c,0x89,0xa4,0x24,0x28,0x00,0x00,0x00 # mov %r12, 40(%rsp)
593+ mov %r12, 40(%rsp)
594
595 #fourth iteration
596- .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x20,0x00,0x00,0x00 # mulx 32($inp), %rax, %rbx
597+ mulx 32($inp), %rax, %rbx
598 adox %rax, %r14
599 adcx %rbx, %r15
600
601@@ -629,25 +637,25 @@ $code.=<<___;
602 mulx 56($inp), $out, %r10
603 adox $out, %r9
604 adcx %rbp, %r10
605+ mulx %rdx, %rax, $out
606 adox %rbp, %r10
607+ mov 32($inp), %rdx
608
609- .byte 0x66
610- mov %r15, %rbx
611- shld \$1, %r14, %r15
612- shld \$1, %rcx, %r14
613-
614- xor %ebp, %ebp
615- mulx %rdx, %rax, %rdx
616+ xor %rbx, %rbx
617+ adcx %rcx, %rax
618+ adox %r13, %r13
619+ adcx %rbp, $out
620+ adox %r14, %r14
621 adcx %rax, %r13
622- adcx %rdx, %r14
623- mov 32($inp), %rdx
624- adcx %rbp, %r15
625+ adox %rbp, %rbx
626+ adcx $out, %r14
627+ adcx %rbp, %rbx
628
629 mov %r13, 48(%rsp)
630 mov %r14, 56(%rsp)
631
632 #fifth iteration
633- .byte 0xc4,0x62,0xc3,0xf6,0x9e,0x28,0x00,0x00,0x00 # mulx 40($inp), $out, %r11
634+ mulx 40($inp), $out, %r11
635 adox $out, %r8
636 adcx %r11, %r9
637
638@@ -658,18 +666,19 @@ $code.=<<___;
639 mulx 56($inp), $out, %r11
640 adox $out, %r10
641 adcx %rbp, %r11
642+ mulx %rdx, %rax, $out
643+ mov 40($inp), %rdx
644 adox %rbp, %r11
645
646- mov %r9, %rcx
647- shld \$1, %r8, %r9
648- shld \$1, %rbx, %r8
649-
650- xor %ebp, %ebp
651- mulx %rdx, %rax, %rdx
652+ xor %rcx, %rcx
653+ adcx %rbx, %rax
654+ adox %r15, %r15
655+ adcx %rbp, $out
656+ adox %r8, %r8
657 adcx %rax, %r15
658- adcx %rdx, %r8
659- mov 40($inp), %rdx
660- adcx %rbp, %r9
661+ adox %rbp, %rcx
662+ adcx $out, %r8
663+ adcx %rbp, %rcx
664
665 mov %r15, 64(%rsp)
666 mov %r8, 72(%rsp)
667@@ -682,18 +691,19 @@ $code.=<<___;
668 .byte 0xc4,0x62,0xc3,0xf6,0xa6,0x38,0x00,0x00,0x00 # mulx 56($inp), $out, %r12
669 adox $out, %r11
670 adcx %rbp, %r12
671+ mulx %rdx, %rax, $out
672 adox %rbp, %r12
673+ mov 48($inp), %rdx
674
675- mov %r11, %rbx
676- shld \$1, %r10, %r11
677- shld \$1, %rcx, %r10
678-
679- xor %ebp, %ebp
680- mulx %rdx, %rax, %rdx
681+ xor %rbx, %rbx
682+ adcx %rcx, %rax
683+ adox %r9, %r9
684+ adcx %rbp, $out
685+ adox %r10, %r10
686 adcx %rax, %r9
687- adcx %rdx, %r10
688- mov 48($inp), %rdx
689- adcx %rbp, %r11
690+ adcx $out, %r10
691+ adox %rbp, %rbx
692+ adcx %rbp, %rbx
693
694 mov %r9, 80(%rsp)
695 mov %r10, 88(%rsp)
696@@ -703,31 +713,31 @@ $code.=<<___;
697 adox %rax, %r12
698 adox %rbp, %r13
699
700- xor %r14, %r14
701- shld \$1, %r13, %r14
702- shld \$1, %r12, %r13
703- shld \$1, %rbx, %r12
704-
705- xor %ebp, %ebp
706- mulx %rdx, %rax, %rdx
707- adcx %rax, %r11
708- adcx %rdx, %r12
709+ mulx %rdx, %rax, $out
710+ xor %rcx, %rcx
711 mov 56($inp), %rdx
712- adcx %rbp, %r13
713+ adcx %rbx, %rax
714+ adox %r11, %r11
715+ adcx %rbp, $out
716+ adox %r12, %r12
717+ adcx %rax, %r11
718+ adox %rbp, %rcx
719+ adcx $out, %r12
720+ adcx %rbp, %rcx
721
722 .byte 0x4c,0x89,0x9c,0x24,0x60,0x00,0x00,0x00 # mov %r11, 96(%rsp)
723 .byte 0x4c,0x89,0xa4,0x24,0x68,0x00,0x00,0x00 # mov %r12, 104(%rsp)
724
725 #eighth iteration
726 mulx %rdx, %rax, %rdx
727- adox %rax, %r13
728- adox %rbp, %rdx
729+ xor %rbx, %rbx
730+ adcx %rcx, %rax
731+ adox %r13, %r13
732+ adcx %rbp, %rdx
733+ adox %rbp, %rbx
734+ adcx %r13, %rax
735+ adcx %rdx, %rbx
736
737- .byte 0x66
738- add %rdx, %r14
739-
740- movq %r13, 112(%rsp)
741- movq %r14, 120(%rsp)
742 movq %xmm0, $out
743 movq %xmm1, %rbp
744
745@@ -741,6 +751,9 @@ $code.=<<___;
746 movq 48(%rsp), %r14
747 movq 56(%rsp), %r15
748
749+ movq %rax, 112(%rsp)
750+ movq %rbx, 120(%rsp)
751+
752 call __rsaz_512_reducex
753
754 addq 64(%rsp), %r8
755--
7562.17.1
757
diff --git a/main/openssl/man-section.patch b/main/openssl/man-section.patch
index 2920145612..0606897f45 100644
--- a/main/openssl/man-section.patch
+++ b/main/openssl/man-section.patch
@@ -25,8 +25,8 @@ index 1292053546f5..c034d21884d8 100644
25 @[ -n "$(INSTALLTOP)" ] || (echo INSTALLTOP should not be empty; exit 1) 25 @[ -n "$(INSTALLTOP)" ] || (echo INSTALLTOP should not be empty; exit 1)
26 @$(ECHO) "*** Installing manpages" 26 @$(ECHO) "*** Installing manpages"
27 $(PERL) $(SRCDIR)/util/process_docs.pl \ 27 $(PERL) $(SRCDIR)/util/process_docs.pl \
28- --destdir=$(DESTDIR)$(MANDIR) --type=man --suffix=$(MANSUFFIX) 28- "--destdir=$(DESTDIR)$(MANDIR)" --type=man --suffix=$(MANSUFFIX)
29+ --destdir=$(DESTDIR)$(MANDIR) --type=man --suffix=$(MANSUFFIX) \ 29+ "--destdir=$(DESTDIR)$(MANDIR)" --type=man --suffix=$(MANSUFFIX) \
30+ --mansection=$(MANSECTION) 30+ --mansection=$(MANSECTION)
31 31
32 uninstall_man_docs: 32 uninstall_man_docs: