git: cb56391c165e - stable/13 - OpenSSL: Merge OpenSSL 1.1.1u
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Tue, 30 May 2023 16:52:35 UTC
The branch stable/13 has been updated by jkim:
URL: https://cgit.FreeBSD.org/src/commit/?id=cb56391c165ebdc10a80545f025a2d8f09106ed5
commit cb56391c165ebdc10a80545f025a2d8f09106ed5
Author: Jung-uk Kim <jkim@FreeBSD.org>
AuthorDate: 2023-05-30 14:24:15 +0000
Commit: Jung-uk Kim <jkim@FreeBSD.org>
CommitDate: 2023-05-30 16:51:24 +0000
OpenSSL: Merge OpenSSL 1.1.1u
(cherry picked from commit 8ecb489345f08012fdc92a202a40119891cac330)
(cherry picked from commit 8f1ef87a6b93af292e68f8e33087e2df6325e9bb)
---
crypto/openssl/CHANGES | 58 ++
crypto/openssl/Configure | 2 +-
crypto/openssl/NEWS | 9 +
crypto/openssl/README | 4 +-
crypto/openssl/crypto/aes/asm/bsaes-armv7.pl | 4 +-
crypto/openssl/crypto/asn1/a_bitstr.c | 8 +-
crypto/openssl/crypto/bio/b_print.c | 1 -
crypto/openssl/crypto/bn/bn_asm.c | 108 ++--
crypto/openssl/crypto/bn/bn_blind.c | 17 +-
crypto/openssl/crypto/bn/bn_err.c | 2 -
crypto/openssl/crypto/bn/bn_lib.c | 24 +-
crypto/openssl/crypto/bn/bn_local.h | 40 +-
crypto/openssl/crypto/bn/build.info | 3 +-
crypto/openssl/crypto/bn/rsa_sup_mul.c | 614 ---------------------
crypto/openssl/crypto/conf/conf_sap.c | 7 +-
crypto/openssl/crypto/err/openssl.txt | 3 +-
crypto/openssl/crypto/objects/obj_dat.c | 21 +-
crypto/openssl/crypto/rsa/rsa_ossl.c | 30 +-
crypto/openssl/crypto/ui/ui_lib.c | 6 +-
crypto/openssl/crypto/ui/ui_util.c | 6 +-
crypto/openssl/crypto/x509/x509_vfy.c | 13 +-
crypto/openssl/crypto/x509v3/pcy_local.h | 10 +-
crypto/openssl/crypto/x509v3/pcy_node.c | 26 +-
crypto/openssl/crypto/x509v3/pcy_tree.c | 56 +-
crypto/openssl/doc/fingerprints.txt | 5 +
.../doc/man3/X509_VERIFY_PARAM_set_flags.pod | 11 +-
crypto/openssl/engines/e_padlock.c | 17 +-
crypto/openssl/include/crypto/bn.h | 5 -
crypto/openssl/include/openssl/opensslv.h | 6 +-
29 files changed, 344 insertions(+), 772 deletions(-)
diff --git a/crypto/openssl/CHANGES b/crypto/openssl/CHANGES
index 459605bf71b6..500b0f653e6a 100644
--- a/crypto/openssl/CHANGES
+++ b/crypto/openssl/CHANGES
@@ -7,6 +7,64 @@
https://github.com/openssl/openssl/commits/ and pick the appropriate
release branch.
+ Changes between 1.1.1t and 1.1.1u [30 May 2023]
+
+ *) Mitigate for the time it takes for `OBJ_obj2txt` to translate gigantic
+ OBJECT IDENTIFIER sub-identifiers to canonical numeric text form.
+
+ OBJ_obj2txt() would translate any size OBJECT IDENTIFIER to canonical
+ numeric text form. For gigantic sub-identifiers, this would take a very
+ long time, the time complexity being O(n^2) where n is the size of that
+ sub-identifier. (CVE-2023-2650)
+
+ To mitigitate this, `OBJ_obj2txt()` will only translate an OBJECT
+ IDENTIFIER to canonical numeric text form if the size of that OBJECT
+ IDENTIFIER is 586 bytes or less, and fail otherwise.
+
+ The basis for this restriction is RFC 2578 (STD 58), section 3.5. OBJECT
+ IDENTIFIER values, which stipulates that OBJECT IDENTIFIERS may have at
+ most 128 sub-identifiers, and that the maximum value that each sub-
+ identifier may have is 2^32-1 (4294967295 decimal).
+
+ For each byte of every sub-identifier, only the 7 lower bits are part of
+ the value, so the maximum amount of bytes that an OBJECT IDENTIFIER with
+ these restrictions may occupy is 32 * 128 / 7, which is approximately 586
+ bytes.
+
+ Ref: https://datatracker.ietf.org/doc/html/rfc2578#section-3.5
+
+ [Richard Levitte]
+
+ *) Reworked the Fix for the Timing Oracle in RSA Decryption (CVE-2022-4304).
+ The previous fix for this timing side channel turned out to cause
+ a severe 2-3x performance regression in the typical use case
+ compared to 1.1.1s. The new fix uses existing constant time
+ code paths, and restores the previous performance level while
+ fully eliminating all existing timing side channels.
+ The fix was developed by Bernd Edlinger with testing support
+ by Hubert Kario.
+ [Bernd Edlinger]
+
+ *) Corrected documentation of X509_VERIFY_PARAM_add0_policy() to mention
+ that it does not enable policy checking. Thanks to
+ David Benjamin for discovering this issue. (CVE-2023-0466)
+ [Tomas Mraz]
+
+ *) Fixed an issue where invalid certificate policies in leaf certificates are
+ silently ignored by OpenSSL and other certificate policy checks are skipped
+ for that certificate. A malicious CA could use this to deliberately assert
+ invalid certificate policies in order to circumvent policy checking on the
+ certificate altogether. (CVE-2023-0465)
+ [Matt Caswell]
+
+ *) Limited the number of nodes created in a policy tree to mitigate
+ against CVE-2023-0464. The default limit is set to 1000 nodes, which
+ should be sufficient for most installations. If required, the limit
+ can be adjusted by setting the OPENSSL_POLICY_TREE_NODES_MAX build
+ time define to a desired maximum number of nodes or zero to allow
+ unlimited growth. (CVE-2023-0464)
+ [Paul Dale]
+
Changes between 1.1.1s and 1.1.1t [7 Feb 2023]
*) Fixed X.400 address type confusion in X.509 GeneralName.
diff --git a/crypto/openssl/Configure b/crypto/openssl/Configure
index 37a99d14715c..fc9658371a24 100755
--- a/crypto/openssl/Configure
+++ b/crypto/openssl/Configure
@@ -1246,7 +1246,7 @@ if ($target =~ /^mingw/ && `$config{CC} --target-help 2>&1` =~ m/-mno-cygwin/m)
}
if ($target =~ /linux.*-mips/ && !$disabled{asm}
- && !grep { $_ !~ /-m(ips|arch=)/ } (@{$config{CFLAGS}})) {
+ && !grep { $_ =~ /-m(ips|arch=)/ } (@{$config{CFLAGS}})) {
# minimally required architecture flags for assembly modules
my $value;
$value = '-mips2' if ($target =~ /mips32/);
diff --git a/crypto/openssl/NEWS b/crypto/openssl/NEWS
index 2724fc4d85ba..f5a2d5ad33f8 100644
--- a/crypto/openssl/NEWS
+++ b/crypto/openssl/NEWS
@@ -5,6 +5,15 @@
This file gives a brief overview of the major changes between each OpenSSL
release. For more details please read the CHANGES file.
+ Major changes between OpenSSL 1.1.1t and OpenSSL 1.1.1u [30 May 2023]
+
+ o Mitigate for very slow `OBJ_obj2txt()` performance with gigantic
+ OBJECT IDENTIFIER sub-identities. (CVE-2023-2650)
+ o Fixed documentation of X509_VERIFY_PARAM_add0_policy() (CVE-2023-0466)
+ o Fixed handling of invalid certificate policies in leaf certificates
+ (CVE-2023-0465)
+ o Limited the number of nodes created in a policy tree ([CVE-2023-0464])
+
Major changes between OpenSSL 1.1.1s and OpenSSL 1.1.1t [7 Feb 2023]
o Fixed X.400 address type confusion in X.509 GeneralName (CVE-2023-0286)
diff --git a/crypto/openssl/README b/crypto/openssl/README
index b2f806be3a44..b9bf50f4b1e5 100644
--- a/crypto/openssl/README
+++ b/crypto/openssl/README
@@ -1,7 +1,7 @@
- OpenSSL 1.1.1t 7 Feb 2023
+ OpenSSL 1.1.1u 30 May 2023
- Copyright (c) 1998-2022 The OpenSSL Project
+ Copyright (c) 1998-2023 The OpenSSL Project
Copyright (c) 1995-1998 Eric A. Young, Tim J. Hudson
All rights reserved.
diff --git a/crypto/openssl/crypto/aes/asm/bsaes-armv7.pl b/crypto/openssl/crypto/aes/asm/bsaes-armv7.pl
index 2b9f241ce8b0..b22db37d4d4c 100755
--- a/crypto/openssl/crypto/aes/asm/bsaes-armv7.pl
+++ b/crypto/openssl/crypto/aes/asm/bsaes-armv7.pl
@@ -1,5 +1,5 @@
#! /usr/bin/env perl
-# Copyright 2012-2020 The OpenSSL Project Authors. All Rights Reserved.
+# Copyright 2012-2023 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the OpenSSL license (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
@@ -14,7 +14,7 @@
# details see http://www.openssl.org/~appro/cryptogams/.
#
# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
-# of Linaro. Permission to use under GPL terms is granted.
+# of Linaro.
# ====================================================================
# Bit-sliced AES for ARM NEON
diff --git a/crypto/openssl/crypto/asn1/a_bitstr.c b/crypto/openssl/crypto/asn1/a_bitstr.c
index f462dd107368..14b57265d172 100644
--- a/crypto/openssl/crypto/asn1/a_bitstr.c
+++ b/crypto/openssl/crypto/asn1/a_bitstr.c
@@ -1,5 +1,5 @@
/*
- * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved.
+ * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
@@ -148,6 +148,9 @@ int ASN1_BIT_STRING_set_bit(ASN1_BIT_STRING *a, int n, int value)
int w, v, iv;
unsigned char *c;
+ if (n < 0)
+ return 0;
+
w = n / 8;
v = 1 << (7 - (n & 0x07));
iv = ~v;
@@ -182,6 +185,9 @@ int ASN1_BIT_STRING_get_bit(const ASN1_BIT_STRING *a, int n)
{
int w, v;
+ if (n < 0)
+ return 0;
+
w = n / 8;
v = 1 << (7 - (n & 0x07));
if ((a == NULL) || (a->length < (w + 1)) || (a->data == NULL))
diff --git a/crypto/openssl/crypto/bio/b_print.c b/crypto/openssl/crypto/bio/b_print.c
index 45d4e9f004b1..c4ed364d72f6 100644
--- a/crypto/openssl/crypto/bio/b_print.c
+++ b/crypto/openssl/crypto/bio/b_print.c
@@ -305,7 +305,6 @@ _dopr(char **sbuffer,
case 'G':
case 'g':
/* not implemented for UEFI */
- ERR_raise(ERR_LIB_BIO, ERR_R_UNSUPPORTED);
return 0;
#endif
case 'c':
diff --git a/crypto/openssl/crypto/bn/bn_asm.c b/crypto/openssl/crypto/bn/bn_asm.c
index 4d83a8cf1115..e5fa81b3a1e3 100644
--- a/crypto/openssl/crypto/bn/bn_asm.c
+++ b/crypto/openssl/crypto/bn/bn_asm.c
@@ -1,5 +1,5 @@
/*
- * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved.
+ * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
@@ -381,25 +381,33 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
#ifndef OPENSSL_SMALL_FOOTPRINT
while (n & ~3) {
t1 = a[0];
- t2 = b[0];
- r[0] = (t1 - t2 - c) & BN_MASK2;
- if (t1 != t2)
- c = (t1 < t2);
+ t2 = (t1 - c) & BN_MASK2;
+ c = (t2 > t1);
+ t1 = b[0];
+ t1 = (t2 - t1) & BN_MASK2;
+ r[0] = t1;
+ c += (t1 > t2);
t1 = a[1];
- t2 = b[1];
- r[1] = (t1 - t2 - c) & BN_MASK2;
- if (t1 != t2)
- c = (t1 < t2);
+ t2 = (t1 - c) & BN_MASK2;
+ c = (t2 > t1);
+ t1 = b[1];
+ t1 = (t2 - t1) & BN_MASK2;
+ r[1] = t1;
+ c += (t1 > t2);
t1 = a[2];
- t2 = b[2];
- r[2] = (t1 - t2 - c) & BN_MASK2;
- if (t1 != t2)
- c = (t1 < t2);
+ t2 = (t1 - c) & BN_MASK2;
+ c = (t2 > t1);
+ t1 = b[2];
+ t1 = (t2 - t1) & BN_MASK2;
+ r[2] = t1;
+ c += (t1 > t2);
t1 = a[3];
- t2 = b[3];
- r[3] = (t1 - t2 - c) & BN_MASK2;
- if (t1 != t2)
- c = (t1 < t2);
+ t2 = (t1 - c) & BN_MASK2;
+ c = (t2 > t1);
+ t1 = b[3];
+ t1 = (t2 - t1) & BN_MASK2;
+ r[3] = t1;
+ c += (t1 > t2);
a += 4;
b += 4;
r += 4;
@@ -408,10 +416,12 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
#endif
while (n) {
t1 = a[0];
- t2 = b[0];
- r[0] = (t1 - t2 - c) & BN_MASK2;
- if (t1 != t2)
- c = (t1 < t2);
+ t2 = (t1 - c) & BN_MASK2;
+ c = (t2 > t1);
+ t1 = b[0];
+ t1 = (t2 - t1) & BN_MASK2;
+ r[0] = t1;
+ c += (t1 > t2);
a++;
b++;
r++;
@@ -446,7 +456,7 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
t += c0; /* no carry */ \
c0 = (BN_ULONG)Lw(t); \
hi = (BN_ULONG)Hw(t); \
- c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+ c1 = (c1+hi)&BN_MASK2; c2 += (c1<hi); \
} while(0)
# define mul_add_c2(a,b,c0,c1,c2) do { \
@@ -455,11 +465,11 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
BN_ULLONG tt = t+c0; /* no carry */ \
c0 = (BN_ULONG)Lw(tt); \
hi = (BN_ULONG)Hw(tt); \
- c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+ c1 = (c1+hi)&BN_MASK2; c2 += (c1<hi); \
t += c0; /* no carry */ \
c0 = (BN_ULONG)Lw(t); \
hi = (BN_ULONG)Hw(t); \
- c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+ c1 = (c1+hi)&BN_MASK2; c2 += (c1<hi); \
} while(0)
# define sqr_add_c(a,i,c0,c1,c2) do { \
@@ -468,7 +478,7 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
t += c0; /* no carry */ \
c0 = (BN_ULONG)Lw(t); \
hi = (BN_ULONG)Hw(t); \
- c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+ c1 = (c1+hi)&BN_MASK2; c2 += (c1<hi); \
} while(0)
# define sqr_add_c2(a,i,j,c0,c1,c2) \
@@ -483,26 +493,26 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
BN_ULONG ta = (a), tb = (b); \
BN_ULONG lo, hi; \
BN_UMULT_LOHI(lo,hi,ta,tb); \
- c0 += lo; hi += (c0<lo)?1:0; \
- c1 += hi; c2 += (c1<hi)?1:0; \
+ c0 += lo; hi += (c0<lo); \
+ c1 += hi; c2 += (c1<hi); \
} while(0)
# define mul_add_c2(a,b,c0,c1,c2) do { \
BN_ULONG ta = (a), tb = (b); \
BN_ULONG lo, hi, tt; \
BN_UMULT_LOHI(lo,hi,ta,tb); \
- c0 += lo; tt = hi+((c0<lo)?1:0); \
- c1 += tt; c2 += (c1<tt)?1:0; \
- c0 += lo; hi += (c0<lo)?1:0; \
- c1 += hi; c2 += (c1<hi)?1:0; \
+ c0 += lo; tt = hi + (c0<lo); \
+ c1 += tt; c2 += (c1<tt); \
+ c0 += lo; hi += (c0<lo); \
+ c1 += hi; c2 += (c1<hi); \
} while(0)
# define sqr_add_c(a,i,c0,c1,c2) do { \
BN_ULONG ta = (a)[i]; \
BN_ULONG lo, hi; \
BN_UMULT_LOHI(lo,hi,ta,ta); \
- c0 += lo; hi += (c0<lo)?1:0; \
- c1 += hi; c2 += (c1<hi)?1:0; \
+ c0 += lo; hi += (c0<lo); \
+ c1 += hi; c2 += (c1<hi); \
} while(0)
# define sqr_add_c2(a,i,j,c0,c1,c2) \
@@ -517,26 +527,26 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
BN_ULONG ta = (a), tb = (b); \
BN_ULONG lo = ta * tb; \
BN_ULONG hi = BN_UMULT_HIGH(ta,tb); \
- c0 += lo; hi += (c0<lo)?1:0; \
- c1 += hi; c2 += (c1<hi)?1:0; \
+ c0 += lo; hi += (c0<lo); \
+ c1 += hi; c2 += (c1<hi); \
} while(0)
# define mul_add_c2(a,b,c0,c1,c2) do { \
BN_ULONG ta = (a), tb = (b), tt; \
BN_ULONG lo = ta * tb; \
BN_ULONG hi = BN_UMULT_HIGH(ta,tb); \
- c0 += lo; tt = hi + ((c0<lo)?1:0); \
- c1 += tt; c2 += (c1<tt)?1:0; \
- c0 += lo; hi += (c0<lo)?1:0; \
- c1 += hi; c2 += (c1<hi)?1:0; \
+ c0 += lo; tt = hi + (c0<lo); \
+ c1 += tt; c2 += (c1<tt); \
+ c0 += lo; hi += (c0<lo); \
+ c1 += hi; c2 += (c1<hi); \
} while(0)
# define sqr_add_c(a,i,c0,c1,c2) do { \
BN_ULONG ta = (a)[i]; \
BN_ULONG lo = ta * ta; \
BN_ULONG hi = BN_UMULT_HIGH(ta,ta); \
- c0 += lo; hi += (c0<lo)?1:0; \
- c1 += hi; c2 += (c1<hi)?1:0; \
+ c0 += lo; hi += (c0<lo); \
+ c1 += hi; c2 += (c1<hi); \
} while(0)
# define sqr_add_c2(a,i,j,c0,c1,c2) \
@@ -551,8 +561,8 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
BN_ULONG lo = LBITS(a), hi = HBITS(a); \
BN_ULONG bl = LBITS(b), bh = HBITS(b); \
mul64(lo,hi,bl,bh); \
- c0 = (c0+lo)&BN_MASK2; if (c0<lo) hi++; \
- c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+ c0 = (c0+lo)&BN_MASK2; hi += (c0<lo); \
+ c1 = (c1+hi)&BN_MASK2; c2 += (c1<hi); \
} while(0)
# define mul_add_c2(a,b,c0,c1,c2) do { \
@@ -561,17 +571,17 @@ BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
BN_ULONG bl = LBITS(b), bh = HBITS(b); \
mul64(lo,hi,bl,bh); \
tt = hi; \
- c0 = (c0+lo)&BN_MASK2; if (c0<lo) tt++; \
- c1 = (c1+tt)&BN_MASK2; if (c1<tt) c2++; \
- c0 = (c0+lo)&BN_MASK2; if (c0<lo) hi++; \
- c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+ c0 = (c0+lo)&BN_MASK2; tt += (c0<lo); \
+ c1 = (c1+tt)&BN_MASK2; c2 += (c1<tt); \
+ c0 = (c0+lo)&BN_MASK2; hi += (c0<lo); \
+ c1 = (c1+hi)&BN_MASK2; c2 += (c1<hi); \
} while(0)
# define sqr_add_c(a,i,c0,c1,c2) do { \
BN_ULONG lo, hi; \
sqr64(lo,hi,(a)[i]); \
- c0 = (c0+lo)&BN_MASK2; if (c0<lo) hi++; \
- c1 = (c1+hi)&BN_MASK2; if (c1<hi) c2++; \
+ c0 = (c0+lo)&BN_MASK2; hi += (c0<lo); \
+ c1 = (c1+hi)&BN_MASK2; c2 += (c1<hi); \
} while(0)
# define sqr_add_c2(a,i,j,c0,c1,c2) \
diff --git a/crypto/openssl/crypto/bn/bn_blind.c b/crypto/openssl/crypto/bn/bn_blind.c
index dd5beea7c93e..e76f6107a7b5 100644
--- a/crypto/openssl/crypto/bn/bn_blind.c
+++ b/crypto/openssl/crypto/bn/bn_blind.c
@@ -13,6 +13,20 @@
#define BN_BLINDING_COUNTER 32
+struct bn_blinding_st {
+ BIGNUM *A;
+ BIGNUM *Ai;
+ BIGNUM *e;
+ BIGNUM *mod; /* just a reference */
+ CRYPTO_THREAD_ID tid;
+ int counter;
+ unsigned long flags;
+ BN_MONT_CTX *m_ctx;
+ int (*bn_mod_exp) (BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
+ const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx);
+ CRYPTO_RWLOCK *lock;
+};
+
BN_BLINDING *BN_BLINDING_new(const BIGNUM *A, const BIGNUM *Ai, BIGNUM *mod)
{
BN_BLINDING *ret = NULL;
@@ -177,7 +191,8 @@ int BN_BLINDING_invert_ex(BIGNUM *n, const BIGNUM *r, BN_BLINDING *b,
n->top = (int)(rtop & ~mask) | (ntop & mask);
n->flags |= (BN_FLG_FIXED_TOP & ~mask);
}
- ret = BN_mod_mul_montgomery(n, n, r, b->m_ctx, ctx);
+ ret = bn_mul_mont_fixed_top(n, n, r, b->m_ctx, ctx);
+ bn_correct_top_consttime(n);
} else {
ret = BN_mod_mul(n, n, r, b->mod, ctx);
}
diff --git a/crypto/openssl/crypto/bn/bn_err.c b/crypto/openssl/crypto/bn/bn_err.c
index 6f5464b54054..6ea5fc3d5cf6 100644
--- a/crypto/openssl/crypto/bn/bn_err.c
+++ b/crypto/openssl/crypto/bn/bn_err.c
@@ -73,8 +73,6 @@ static const ERR_STRING_DATA BN_str_functs[] = {
{ERR_PACK(ERR_LIB_BN, BN_F_BN_SET_WORDS, 0), "bn_set_words"},
{ERR_PACK(ERR_LIB_BN, BN_F_BN_STACK_PUSH, 0), "BN_STACK_push"},
{ERR_PACK(ERR_LIB_BN, BN_F_BN_USUB, 0), "BN_usub"},
- {ERR_PACK(ERR_LIB_BN, BN_F_OSSL_BN_RSA_DO_UNBLIND, 0),
- "ossl_bn_rsa_do_unblind"},
{0, NULL}
};
diff --git a/crypto/openssl/crypto/bn/bn_lib.c b/crypto/openssl/crypto/bn/bn_lib.c
index eb4a31849bef..cd6aa3448afd 100644
--- a/crypto/openssl/crypto/bn/bn_lib.c
+++ b/crypto/openssl/crypto/bn/bn_lib.c
@@ -1,5 +1,5 @@
/*
- * Copyright 1995-2020 The OpenSSL Project Authors. All Rights Reserved.
+ * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
@@ -1001,6 +1001,28 @@ BIGNUM *bn_wexpand(BIGNUM *a, int words)
return (words <= a->dmax) ? a : bn_expand2(a, words);
}
+void bn_correct_top_consttime(BIGNUM *a)
+{
+ int j, atop;
+ BN_ULONG limb;
+ unsigned int mask;
+
+ for (j = 0, atop = 0; j < a->dmax; j++) {
+ limb = a->d[j];
+ limb |= 0 - limb;
+ limb >>= BN_BITS2 - 1;
+ limb = 0 - limb;
+ mask = (unsigned int)limb;
+ mask &= constant_time_msb(j - a->top);
+ atop = constant_time_select_int(mask, j + 1, atop);
+ }
+
+ mask = constant_time_eq_int(atop, 0);
+ a->top = atop;
+ a->neg = constant_time_select_int(mask, 0, a->neg);
+ a->flags &= ~BN_FLG_FIXED_TOP;
+}
+
void bn_correct_top(BIGNUM *a)
{
BN_ULONG *ftl;
diff --git a/crypto/openssl/crypto/bn/bn_local.h b/crypto/openssl/crypto/bn/bn_local.h
index 30b7614fdbb2..818e34348e1b 100644
--- a/crypto/openssl/crypto/bn/bn_local.h
+++ b/crypto/openssl/crypto/bn/bn_local.h
@@ -283,20 +283,6 @@ struct bn_gencb_st {
} cb;
};
-struct bn_blinding_st {
- BIGNUM *A;
- BIGNUM *Ai;
- BIGNUM *e;
- BIGNUM *mod; /* just a reference */
- CRYPTO_THREAD_ID tid;
- int counter;
- unsigned long flags;
- BN_MONT_CTX *m_ctx;
- int (*bn_mod_exp) (BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
- const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx);
- CRYPTO_RWLOCK *lock;
-};
-
/*-
* BN_window_bits_for_exponent_size -- macro for sliding window mod_exp functions
*
@@ -529,10 +515,10 @@ unsigned __int64 _umul128(unsigned __int64 a, unsigned __int64 b,
ret = (r); \
BN_UMULT_LOHI(low,high,w,tmp); \
ret += (c); \
- (c) = (ret<(c))?1:0; \
+ (c) = (ret<(c)); \
(c) += high; \
ret += low; \
- (c) += (ret<low)?1:0; \
+ (c) += (ret<low); \
(r) = ret; \
}
@@ -541,7 +527,7 @@ unsigned __int64 _umul128(unsigned __int64 a, unsigned __int64 b,
BN_UMULT_LOHI(low,high,w,ta); \
ret = low + (c); \
(c) = high; \
- (c) += (ret<low)?1:0; \
+ (c) += (ret<low); \
(r) = ret; \
}
@@ -557,10 +543,10 @@ unsigned __int64 _umul128(unsigned __int64 a, unsigned __int64 b,
high= BN_UMULT_HIGH(w,tmp); \
ret += (c); \
low = (w) * tmp; \
- (c) = (ret<(c))?1:0; \
+ (c) = (ret<(c)); \
(c) += high; \
ret += low; \
- (c) += (ret<low)?1:0; \
+ (c) += (ret<low); \
(r) = ret; \
}
@@ -570,7 +556,7 @@ unsigned __int64 _umul128(unsigned __int64 a, unsigned __int64 b,
high= BN_UMULT_HIGH(w,ta); \
ret = low + (c); \
(c) = high; \
- (c) += (ret<low)?1:0; \
+ (c) += (ret<low); \
(r) = ret; \
}
@@ -603,10 +589,10 @@ unsigned __int64 _umul128(unsigned __int64 a, unsigned __int64 b,
lt=(bl)*(lt); \
m1=(bl)*(ht); \
ht =(bh)*(ht); \
- m=(m+m1)&BN_MASK2; if (m < m1) ht+=L2HBITS((BN_ULONG)1); \
+ m=(m+m1)&BN_MASK2; ht += L2HBITS((BN_ULONG)(m < m1)); \
ht+=HBITS(m); \
m1=L2HBITS(m); \
- lt=(lt+m1)&BN_MASK2; if (lt < m1) ht++; \
+ lt=(lt+m1)&BN_MASK2; ht += (lt < m1); \
(l)=lt; \
(h)=ht; \
}
@@ -623,7 +609,7 @@ unsigned __int64 _umul128(unsigned __int64 a, unsigned __int64 b,
h*=h; \
h+=(m&BN_MASK2h1)>>(BN_BITS4-1); \
m =(m&BN_MASK2l)<<(BN_BITS4+1); \
- l=(l+m)&BN_MASK2; if (l < m) h++; \
+ l=(l+m)&BN_MASK2; h += (l < m); \
(lo)=l; \
(ho)=h; \
}
@@ -637,9 +623,9 @@ unsigned __int64 _umul128(unsigned __int64 a, unsigned __int64 b,
mul64(l,h,(bl),(bh)); \
\
/* non-multiply part */ \
- l=(l+(c))&BN_MASK2; if (l < (c)) h++; \
+ l=(l+(c))&BN_MASK2; h += (l < (c)); \
(c)=(r); \
- l=(l+(c))&BN_MASK2; if (l < (c)) h++; \
+ l=(l+(c))&BN_MASK2; h += (l < (c)); \
(c)=h&BN_MASK2; \
(r)=l; \
}
@@ -653,7 +639,7 @@ unsigned __int64 _umul128(unsigned __int64 a, unsigned __int64 b,
mul64(l,h,(bl),(bh)); \
\
/* non-multiply part */ \
- l+=(c); if ((l&BN_MASK2) < (c)) h++; \
+ l+=(c); h += ((l&BN_MASK2) < (c)); \
(c)=h&BN_MASK2; \
(r)=l&BN_MASK2; \
}
@@ -683,7 +669,7 @@ BN_ULONG bn_sub_part_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b,
int cl, int dl);
int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
const BN_ULONG *np, const BN_ULONG *n0, int num);
-
+void bn_correct_top_consttime(BIGNUM *a);
BIGNUM *int_bn_mod_inverse(BIGNUM *in,
const BIGNUM *a, const BIGNUM *n, BN_CTX *ctx,
int *noinv);
diff --git a/crypto/openssl/crypto/bn/build.info b/crypto/openssl/crypto/bn/build.info
index c9fe2fdada69..b9ed5322fa68 100644
--- a/crypto/openssl/crypto/bn/build.info
+++ b/crypto/openssl/crypto/bn/build.info
@@ -5,8 +5,7 @@ SOURCE[../../libcrypto]=\
bn_kron.c bn_sqrt.c bn_gcd.c bn_prime.c bn_err.c bn_sqr.c \
{- $target{bn_asm_src} -} \
bn_recp.c bn_mont.c bn_mpi.c bn_exp2.c bn_gf2m.c bn_nist.c \
- bn_depr.c bn_const.c bn_x931p.c bn_intern.c bn_dh.c bn_srp.c \
- rsa_sup_mul.c
+ bn_depr.c bn_const.c bn_x931p.c bn_intern.c bn_dh.c bn_srp.c
INCLUDE[bn_exp.o]=..
diff --git a/crypto/openssl/crypto/bn/rsa_sup_mul.c b/crypto/openssl/crypto/bn/rsa_sup_mul.c
deleted file mode 100644
index acafefd5febf..000000000000
--- a/crypto/openssl/crypto/bn/rsa_sup_mul.c
+++ /dev/null
@@ -1,614 +0,0 @@
-#include <openssl/e_os2.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <string.h>
-#include <openssl/bn.h>
-#include <openssl/err.h>
-#include <openssl/rsaerr.h>
-#include "internal/numbers.h"
-#include "internal/constant_time.h"
-#include "bn_local.h"
-
-# if BN_BYTES == 8
-typedef uint64_t limb_t;
-# if defined(__SIZEOF_INT128__) && __SIZEOF_INT128__ == 16
-/* nonstandard; implemented by gcc on 64-bit platforms */
-typedef __uint128_t limb2_t;
-# define HAVE_LIMB2_T
-# endif
-# define LIMB_BIT_SIZE 64
-# define LIMB_BYTE_SIZE 8
-# elif BN_BYTES == 4
-typedef uint32_t limb_t;
-typedef uint64_t limb2_t;
-# define LIMB_BIT_SIZE 32
-# define LIMB_BYTE_SIZE 4
-# define HAVE_LIMB2_T
-# else
-# error "Not supported"
-# endif
-
-/*
- * For multiplication we're using schoolbook multiplication,
- * so if we have two numbers, each with 6 "digits" (words)
- * the multiplication is calculated as follows:
- * A B C D E F
- * x I J K L M N
- * --------------
- * N*F
- * N*E
- * N*D
- * N*C
- * N*B
- * N*A
- * M*F
- * M*E
- * M*D
- * M*C
- * M*B
- * M*A
- * L*F
- * L*E
- * L*D
- * L*C
- * L*B
- * L*A
- * K*F
- * K*E
- * K*D
- * K*C
- * K*B
- * K*A
- * J*F
- * J*E
- * J*D
- * J*C
- * J*B
- * J*A
- * I*F
- * I*E
- * I*D
- * I*C
- * I*B
- * + I*A
- * ==========================
- * N*B N*D N*F
- * + N*A N*C N*E
- * + M*B M*D M*F
- * + M*A M*C M*E
- * + L*B L*D L*F
- * + L*A L*C L*E
- * + K*B K*D K*F
- * + K*A K*C K*E
- * + J*B J*D J*F
- * + J*A J*C J*E
- * + I*B I*D I*F
- * + I*A I*C I*E
- *
- * 1+1 1+3 1+5
- * 1+0 1+2 1+4
- * 0+1 0+3 0+5
- * 0+0 0+2 0+4
- *
- * 0 1 2 3 4 5 6
- * which requires n^2 multiplications and 2n full length additions
- * as we can keep every other result of limb multiplication in two separate
- * limbs
- */
-
-#if defined HAVE_LIMB2_T
-static ossl_inline void _mul_limb(limb_t *hi, limb_t *lo, limb_t a, limb_t b)
-{
- limb2_t t;
- /*
- * this is idiomatic code to tell compiler to use the native mul
- * those three lines will actually compile to single instruction
- */
-
- t = (limb2_t)a * b;
- *hi = t >> LIMB_BIT_SIZE;
- *lo = (limb_t)t;
-}
-#elif (BN_BYTES == 8) && (defined _MSC_VER)
-/* https://learn.microsoft.com/en-us/cpp/intrinsics/umul128?view=msvc-170 */
-#pragma intrinsic(_umul128)
-static ossl_inline void _mul_limb(limb_t *hi, limb_t *lo, limb_t a, limb_t b)
-{
- *lo = _umul128(a, b, hi);
-}
-#else
-/*
- * if the compiler doesn't have either a 128bit data type nor a "return
- * high 64 bits of multiplication"
- */
-static ossl_inline void _mul_limb(limb_t *hi, limb_t *lo, limb_t a, limb_t b)
-{
- limb_t a_low = (limb_t)(uint32_t)a;
- limb_t a_hi = a >> 32;
- limb_t b_low = (limb_t)(uint32_t)b;
- limb_t b_hi = b >> 32;
-
- limb_t p0 = a_low * b_low;
- limb_t p1 = a_low * b_hi;
- limb_t p2 = a_hi * b_low;
- limb_t p3 = a_hi * b_hi;
-
- uint32_t cy = (uint32_t)(((p0 >> 32) + (uint32_t)p1 + (uint32_t)p2) >> 32);
-
- *lo = p0 + (p1 << 32) + (p2 << 32);
- *hi = p3 + (p1 >> 32) + (p2 >> 32) + cy;
-}
-#endif
-
-/* add two limbs with carry in, return carry out */
-static ossl_inline limb_t _add_limb(limb_t *ret, limb_t a, limb_t b, limb_t carry)
-{
- limb_t carry1, carry2, t;
- /*
- * `c = a + b; if (c < a)` is idiomatic code that makes compilers
- * use add with carry on assembly level
- */
-
- *ret = a + carry;
- if (*ret < a)
- carry1 = 1;
- else
- carry1 = 0;
-
- t = *ret;
- *ret = t + b;
- if (*ret < t)
- carry2 = 1;
- else
- carry2 = 0;
-
- return carry1 + carry2;
-}
-
-/*
- * add two numbers of the same size, return overflow
- *
- * add a to b, place result in ret; all arrays need to be n limbs long
- * return overflow from addition (0 or 1)
- */
-static ossl_inline limb_t add(limb_t *ret, limb_t *a, limb_t *b, size_t n)
-{
- limb_t c = 0;
- ossl_ssize_t i;
-
- for(i = n - 1; i > -1; i--)
- c = _add_limb(&ret[i], a[i], b[i], c);
-
- return c;
-}
-
-/*
- * return number of limbs necessary for temporary values
- * when multiplying numbers n limbs large
- */
-static ossl_inline size_t mul_limb_numb(size_t n)
-{
- return 2 * n * 2;
-}
-
-/*
- * multiply two numbers of the same size
- *
- * multiply a by b, place result in ret; a and b need to be n limbs long
- * ret needs to be 2*n limbs long, tmp needs to be mul_limb_numb(n) limbs
- * long
- */
-static void limb_mul(limb_t *ret, limb_t *a, limb_t *b, size_t n, limb_t *tmp)
-{
- limb_t *r_odd, *r_even;
- size_t i, j, k;
-
- r_odd = tmp;
- r_even = &tmp[2 * n];
-
- memset(ret, 0, 2 * n * sizeof(limb_t));
-
- for (i = 0; i < n; i++) {
- for (k = 0; k < i + n + 1; k++) {
- r_even[k] = 0;
- r_odd[k] = 0;
- }
- for (j = 0; j < n; j++) {
- /*
- * place results from even and odd limbs in separate arrays so that
- * we don't have to calculate overflow every time we get individual
- * limb multiplication result
- */
- if (j % 2 == 0)
- _mul_limb(&r_even[i + j], &r_even[i + j + 1], a[i], b[j]);
- else
- _mul_limb(&r_odd[i + j], &r_odd[i + j + 1], a[i], b[j]);
- }
- /*
- * skip the least significant limbs when adding multiples of
- * more significant limbs (they're zero anyway)
- */
- add(ret, ret, r_even, n + i + 1);
- add(ret, ret, r_odd, n + i + 1);
- }
-}
-
-/* modifies the value in place by performing a right shift by one bit */
-static ossl_inline void rshift1(limb_t *val, size_t n)
-{
- limb_t shift_in = 0, shift_out = 0;
- size_t i;
-
- for (i = 0; i < n; i++) {
- shift_out = val[i] & 1;
- val[i] = shift_in << (LIMB_BIT_SIZE - 1) | (val[i] >> 1);
- shift_in = shift_out;
- }
-}
-
-/* extend the LSB of flag to all bits of limb */
-static ossl_inline limb_t mk_mask(limb_t flag)
-{
- flag |= flag << 1;
- flag |= flag << 2;
- flag |= flag << 4;
- flag |= flag << 8;
- flag |= flag << 16;
-#if (LIMB_BYTE_SIZE == 8)
- flag |= flag << 32;
-#endif
- return flag;
-}
-
-/*
- * copy from either a or b to ret based on flag
- * when flag == 0, then copies from b
- * when flag == 1, then copies from a
- */
-static ossl_inline void cselect(limb_t flag, limb_t *ret, limb_t *a, limb_t *b, size_t n)
-{
- /*
- * would be more efficient with non volatile mask, but then gcc
- * generates code with jumps
- */
- volatile limb_t mask;
- size_t i;
-
- mask = mk_mask(flag);
- for (i = 0; i < n; i++) {
-#if (LIMB_BYTE_SIZE == 8)
- ret[i] = constant_time_select_64(mask, a[i], b[i]);
-#else
- ret[i] = constant_time_select_32(mask, a[i], b[i]);
-#endif
- }
-}
-
-static limb_t _sub_limb(limb_t *ret, limb_t a, limb_t b, limb_t borrow)
-{
- limb_t borrow1, borrow2, t;
- /*
- * while it doesn't look constant-time, this is idiomatic code
- * to tell compilers to use the carry bit from subtraction
- */
-
- *ret = a - borrow;
- if (*ret > a)
- borrow1 = 1;
- else
- borrow1 = 0;
-
- t = *ret;
- *ret = t - b;
- if (*ret > t)
- borrow2 = 1;
- else
- borrow2 = 0;
-
- return borrow1 + borrow2;
-}
-
-/*
- * place the result of a - b into ret, return the borrow bit.
- * All arrays need to be n limbs long
- */
-static limb_t sub(limb_t *ret, limb_t *a, limb_t *b, size_t n)
-{
- limb_t borrow = 0;
- ossl_ssize_t i;
*** 923 LINES SKIPPED ***