diff -u libgcrypt11-1.4.4/debian/changelog libgcrypt11-1.4.4/debian/changelog --- libgcrypt11-1.4.4/debian/changelog +++ libgcrypt11-1.4.4/debian/changelog @@ -1,3 +1,44 @@ +libgcrypt11 (1.4.4-5ubuntu2.4) lucid-security; urgency=medium + + * SECURITY UPDATE: sidechannel attack on Elgamal + - debian/patches/24-CVE-2014-3591.diff: use ciphertext blinding in + cipher/elgamal.c. + - CVE-2014-3591 + * SECURITY UPDATE: sidechannel attack via timing variations in mpi_powm + - debian/patches/25-CVE-2015-0837.diff: avoid timing variations in + mpi/mpi-pow.c, mpi/mpiutil.c, src/mpi.h. + - CVE-2015-0837 + + -- Marc Deslauriers Thu, 26 Mar 2015 08:55:36 -0400 + +libgcrypt11 (1.4.4-5ubuntu2.3) lucid-security; urgency=medium + + * SECURITY UPDATE: side-channel attack on Elgamal encryption subkeys + - debian/patches/22-add_gcry_divide_by_zero.diff: replace deliberate + division by zero with new _gcry_divide_by_zero(). + - debian/patches/23-CVE-2014-5270.diff: use sliding window method for + exponentiation algorithm in mpi/mpi-pow.c. + - CVE-2014-5270 + + -- Marc Deslauriers Tue, 19 Aug 2014 09:12:39 -0400 + +libgcrypt11 (1.4.4-5ubuntu2.2) lucid-security; urgency=low + + * SECURITY UPDATE: The path of execution in an exponentiation function may + depend upon secret key data, allowing a local attacker to determine the + contents of the secret key through a side-channel attack. + - debian/patches/21-CVE-2013-4242.diff: always perform the mpi_mul for + exponents in secure memory. Based on upstream patch. + - CVE-2013-4242 + + -- Seth Arnold Tue, 30 Jul 2013 18:05:18 -0700 + +libgcrypt11 (1.4.4-5ubuntu2.1) lucid-proposed; urgency=low + + * Do not call global_init when setting thread callbacks (LP: #423252) + + -- Adam Stokes Thu, 24 May 2012 16:31:52 -0400 + libgcrypt11 (1.4.4-5ubuntu2) lucid; urgency=low * Adjust install/libgcrypt11-dev target to cope with move to /lib. only in patch2: unchanged: --- libgcrypt11-1.4.4.orig/debian/patches/24-CVE-2014-3591.diff +++ libgcrypt11-1.4.4/debian/patches/24-CVE-2014-3591.diff @@ -0,0 +1,80 @@ +From 35cd81f134c0da4e7e6fcfe40d270ee1251f52c2 Mon Sep 17 00:00:00 2001 +From: Werner Koch +Date: Mon, 23 Feb 2015 11:39:58 +0100 +Subject: [PATCH] cipher: Use ciphertext blinding for Elgamal decryption. + +diff -Nur -x '*.orig' -x '*~' libgcrypt11-1.4.4/cipher/elgamal.c libgcrypt11-1.4.4.new/cipher/elgamal.c +--- libgcrypt11-1.4.4/cipher/elgamal.c 2008-11-24 13:51:42.000000000 -0500 ++++ libgcrypt11-1.4.4.new/cipher/elgamal.c 2015-03-26 08:53:48.168420903 -0400 +@@ -30,6 +30,12 @@ + #include "mpi.h" + #include "cipher.h" + ++/* Blinding is used to mitigate side-channel attacks. You may undef ++ this to speed up the operation in case the system is secured ++ against physical and network mounted side-channel attacks. */ ++#define USE_BLINDING 1 ++ ++ + typedef struct + { + gcry_mpi_t p; /* prime */ +@@ -486,12 +492,45 @@ + static void + decrypt(gcry_mpi_t output, gcry_mpi_t a, gcry_mpi_t b, ELG_secret_key *skey ) + { +- gcry_mpi_t t1 = mpi_alloc_secure( mpi_get_nlimbs( skey->p ) ); ++ gcry_mpi_t t1, t2, r; ++ unsigned int nbits = mpi_get_nbits (skey->p); ++ ++ mpi_normalize (a); ++ mpi_normalize (b); ++ ++ t1 = mpi_snew (nbits); ++ ++#ifdef USE_BLINDING ++ ++ t2 = mpi_snew (nbits); ++ r = mpi_new (nbits); ++ ++ /* We need a random number of about the prime size. The random ++ number merely needs to be unpredictable; thus we use level 0. */ ++ _gcry_mpi_randomize (r, nbits, GCRY_WEAK_RANDOM); ++ ++ /* t1 = r^x mod p */ ++ mpi_powm (t1, r, skey->x, skey->p); ++ /* t2 = (a * r)^-x mod p */ ++ mpi_mulm (t2, a, r, skey->p); ++ mpi_powm (t2, t2, skey->x, skey->p); ++ mpi_invm (t2, t2, skey->p); ++ /* t1 = (t1 * t2) mod p*/ ++ mpi_mulm (t1, t1, t2, skey->p); ++ ++ mpi_free (r); ++ mpi_free (t2); ++ ++#else /*!USE_BLINDING*/ + + /* output = b/(a^x) mod p */ +- gcry_mpi_powm( t1, a, skey->x, skey->p ); +- mpi_invm( t1, t1, skey->p ); +- mpi_mulm( output, b, t1, skey->p ); ++ mpi_powm (t1, a, skey->x, skey->p); ++ mpi_invm (t1, t1, skey->p); ++ ++#endif /*!USE_BLINDING*/ ++ ++ mpi_mulm (output, b, t1, skey->p); ++ + #if 0 + if( DBG_CIPHER ) + { +@@ -502,7 +541,7 @@ + log_mpidump("elg decrypted M= ", output); + } + #endif +- mpi_free(t1); ++ mpi_free (t1); + } + + only in patch2: unchanged: --- libgcrypt11-1.4.4.orig/debian/patches/25-CVE-2015-0837.diff +++ libgcrypt11-1.4.4/debian/patches/25-CVE-2015-0837.diff @@ -0,0 +1,215 @@ +Description: fix sidechannel attack via timing variations in mpi_powm +Origin: backport, http://git.gnupg.org/cgi-bin/gitweb.cgi?p=libgcrypt.git;a=commit;h=deb6f231ba85f65283c9e1deb3e2dea3b6ca46dc +Origin: backport, http://git.gnupg.org/cgi-bin/gitweb.cgi?p=libgcrypt.git;a=commit;h=d9f002899d26dc64f1502ae5050632340a4780fe +Origin: backport, http://git.gnupg.org/cgi-bin/gitweb.cgi?p=libgcrypt.git;a=commit;h=5e72b6c76ebee720f69b8a5c212f52d38eb50287 + +diff -Nur -x '*.orig' -x '*~' libgcrypt11-1.4.4/mpi/mpi-pow.c libgcrypt11-1.4.4.new/mpi/mpi-pow.c +--- libgcrypt11-1.4.4/mpi/mpi-pow.c 2015-03-26 08:54:09.500582438 -0400 ++++ libgcrypt11-1.4.4.new/mpi/mpi-pow.c 2015-03-26 08:54:18.368649838 -0400 +@@ -377,7 +377,7 @@ + *xsize_p = rsize + ssize; + } + +-#define SIZE_B_2I3 ((1 << (5 - 1)) - 1) ++#define SIZE_PRECOMP ((1 << (5 - 1))) + + /**************** + * RES = BASE ^ EXPO mod MOD +@@ -413,11 +413,12 @@ + unsigned int bp_nlimbs = 0; + unsigned int ep_nlimbs = 0; + unsigned int xp_nlimbs = 0; +- mpi_ptr_t b_2i3[SIZE_B_2I3]; /* Pre-computed array: BASE^3, ^5, ^7, ... */ +- mpi_size_t b_2i3size[SIZE_B_2I3]; ++ mpi_ptr_t precomp[SIZE_PRECOMP]; /* Pre-computed array: BASE^1, ^3, ^5, ... */ ++ mpi_size_t precomp_size[SIZE_PRECOMP]; + mpi_size_t W; + mpi_ptr_t base_u; + mpi_size_t base_u_size; ++ mpi_size_t max_u_size; + + esize = expo->nlimbs; + msize = mod->nlimbs; +@@ -536,7 +537,7 @@ + + /* Main processing. */ + { +- mpi_size_t i, j; ++ mpi_size_t i, j, k; + mpi_ptr_t xp; + mpi_size_t xsize; + int c; +@@ -551,33 +552,30 @@ + memset( &karactx, 0, sizeof karactx ); + negative_result = (ep[0] & 1) && bsign; + +- /* Precompute B_2I3[], BASE^(2 * i + 3), BASE^3, ^5, ^7, ... */ ++ /* Precompute PRECOMP[], BASE^(2 * i + 1), BASE^1, ^3, ^5, ... */ + if (W > 1) /* X := BASE^2 */ + mul_mod (xp, &xsize, bp, bsize, bp, bsize, mp, msize, &karactx); +- for (i = 0; i < (1 << (W - 1)) - 1; i++) +- { /* B_2I3[i] = BASE^(2 * i + 3) */ +- if (i == 0) +- { +- base_u = bp; +- base_u_size = bsize; +- } +- else +- { +- base_u = b_2i3[i-1]; +- base_u_size = b_2i3size[i-1]; +- } +- ++ base_u = precomp[0] = mpi_alloc_limb_space (bsize, esec); ++ base_u_size = max_u_size = precomp_size[0] = bsize; ++ MPN_COPY (precomp[0], bp, bsize); ++ for (i = 1; i < (1 << (W - 1)); i++) ++ { /* PRECOMP[i] = BASE^(2 * i + 1) */ + if (xsize >= base_u_size) + mul_mod (rp, &rsize, xp, xsize, base_u, base_u_size, + mp, msize, &karactx); + else + mul_mod (rp, &rsize, base_u, base_u_size, xp, xsize, + mp, msize, &karactx); +- b_2i3[i] = mpi_alloc_limb_space (rsize, esec); +- b_2i3size[i] = rsize; +- MPN_COPY (b_2i3[i], rp, rsize); ++ base_u = precomp[i] = mpi_alloc_limb_space (rsize, esec); ++ base_u_size = precomp_size[i] = rsize; ++ if (max_u_size < base_u_size) ++ max_u_size = base_u_size; ++ MPN_COPY (precomp[i], rp, rsize); + } + ++ base_u = mpi_alloc_limb_space (max_u_size, esec); ++ MPN_ZERO (base_u, max_u_size); ++ + i = esize - 1; + + /* Main loop. +@@ -663,15 +661,23 @@ + rsize = xsize; + } + +- if (e0 == 0) ++ /* ++ * base_u <= precomp[e0] ++ * base_u_size <= precomp_size[e0] ++ */ ++ base_u_size = 0; ++ for (k = 0; k < (1<< (W - 1)); k++) + { +- base_u = bp; +- base_u_size = bsize; +- } +- else +- { +- base_u = b_2i3[e0 - 1]; +- base_u_size = b_2i3size[e0 -1]; ++ struct gcry_mpi w, u; ++ w.alloced = w.nlimbs = precomp_size[k]; ++ u.alloced = u.nlimbs = precomp_size[k]; ++ w.sign = u.sign = 0; ++ w.flags = u.flags = 0; ++ w.d = base_u; ++ u.d = precomp[k]; ++ ++ mpi_set_cond (&w, &u, k == e0); ++ base_u_size |= (precomp_size[k] & ((mpi_size_t)0 - (k == e0)) ); + } + + mul_mod (xp, &xsize, rp, rsize, base_u, base_u_size, +@@ -699,15 +705,23 @@ + + if (e != 0) + { +- if ((e>>1) == 0) ++ /* ++ * base_u <= precomp[(e>>1)] ++ * base_u_size <= precomp_size[(e>>1)] ++ */ ++ base_u_size = 0; ++ for (k = 0; k < (1<< (W - 1)); k++) + { +- base_u = bp; +- base_u_size = bsize; +- } +- else +- { +- base_u = b_2i3[(e>>1) - 1]; +- base_u_size = b_2i3size[(e>>1) -1]; ++ struct gcry_mpi w, u; ++ w.alloced = w.nlimbs = precomp_size[k]; ++ u.alloced = u.nlimbs = precomp_size[k]; ++ w.sign = u.sign = 0; ++ w.flags = u.flags = 0; ++ w.d = base_u; ++ u.d = precomp[k]; ++ ++ mpi_set_cond (&w, &u, k == (e>>1)); ++ base_u_size |= (precomp_size[k] & ((mpi_size_t)0 - (k == (e>>1))) ); + } + + mul_mod (xp, &xsize, rp, rsize, base_u, base_u_size, +@@ -757,8 +771,9 @@ + MPN_NORMALIZE (rp, rsize); + + _gcry_mpih_release_karatsuba_ctx (&karactx ); +- for (i = 0; i < (1 << (W - 1)) - 1; i++) +- _gcry_mpi_free_limb_space( b_2i3[i], esec ? b_2i3size[i] : 0 ); ++ for (i = 0; i < (1 << (W - 1)); i++) ++ _gcry_mpi_free_limb_space( precomp[i], esec ? precomp_size[i] : 0 ); ++ _gcry_mpi_free_limb_space (base_u, esec ? max_u_size : 0); + } + + /* Fixup for negative results. */ +diff -Nur -x '*.orig' -x '*~' libgcrypt11-1.4.4/mpi/mpiutil.c libgcrypt11-1.4.4.new/mpi/mpiutil.c +--- libgcrypt11-1.4.4/mpi/mpiutil.c 2008-09-03 06:04:41.000000000 -0400 ++++ libgcrypt11-1.4.4.new/mpi/mpiutil.c 2015-03-26 08:54:18.368649838 -0400 +@@ -387,6 +387,31 @@ + / BITS_PER_MPI_LIMB ); + } + ++gcry_mpi_t ++_gcry_mpi_set_cond (gcry_mpi_t w, const gcry_mpi_t u, unsigned long set) ++{ ++ mpi_size_t i; ++ mpi_size_t nlimbs = u->alloced; ++ mpi_limb_t mask = ((mpi_limb_t)0) - !!set; ++ mpi_limb_t x; ++ ++ if (w->alloced != u->alloced) ++ log_bug ("mpi_set_cond: different sizes\n"); ++ ++ for (i = 0; i < nlimbs; i++) ++ { ++ x = mask & (w->d[i] ^ u->d[i]); ++ w->d[i] = w->d[i] ^ x; ++ } ++ ++ x = mask & (w->nlimbs ^ u->nlimbs); ++ w->nlimbs = w->nlimbs ^ x; ++ ++ x = mask & (w->sign ^ u->sign); ++ w->sign = w->sign ^ x; ++ return w; ++} ++ + + gcry_mpi_t + gcry_mpi_snew( unsigned int nbits ) +diff -Nur -x '*.orig' -x '*~' libgcrypt11-1.4.4/src/mpi.h libgcrypt11-1.4.4.new/src/mpi.h +--- libgcrypt11-1.4.4/src/mpi.h 2008-08-19 12:22:51.000000000 -0400 ++++ libgcrypt11-1.4.4.new/src/mpi.h 2015-03-26 08:54:18.368649838 -0400 +@@ -115,8 +115,11 @@ + #define mpi_swap(a,b) _gcry_mpi_swap ((a),(b)) + #define mpi_new(n) _gcry_mpi_new ((n)) + #define mpi_snew(n) _gcry_mpi_snew ((n)) ++#define mpi_set_cond(w,u,set) _gcry_mpi_set_cond ((w),(u),(set)) + + void _gcry_mpi_clear( gcry_mpi_t a ); ++gcry_mpi_t _gcry_mpi_set_cond (gcry_mpi_t w, const gcry_mpi_t u, ++ unsigned long swap); + gcry_mpi_t _gcry_mpi_alloc_like( gcry_mpi_t a ); + gcry_mpi_t _gcry_mpi_alloc_set_ui( unsigned long u); + gcry_err_code_t _gcry_mpi_get_ui (gcry_mpi_t w, ulong *u); only in patch2: unchanged: --- libgcrypt11-1.4.4.orig/debian/patches/21-CVE-2013-4242.diff +++ libgcrypt11-1.4.4/debian/patches/21-CVE-2013-4242.diff @@ -0,0 +1,68 @@ +From: Werner Koch +Date: Thu, 25 Jul 2013 09:17:52 +0000 (+0200) +Subject: Mitigate a flush+reload cache attack on RSA secret exponents. +X-Git-Url: http://git.gnupg.org/cgi-bin/gitweb.cgi?p=libgcrypt.git;a=commitdiff_plain;h=287bf0e543f244d784cf8b58340bf0ab3c6aba97;hp=37d0a1ebdc2dc74df4fb6bf0621045018122a68f + +Mitigate a flush+reload cache attack on RSA secret exponents. + +* mpi/mpi-pow.c (gcry_mpi_powm): Always perfrom the mpi_mul for +exponents in secure memory. +-- + +The attack is published as http://eprint.iacr.org/2013/448 : + +Flush+Reload: a High Resolution, Low Noise, L3 Cache Side-Channel +Attack by Yuval Yarom and Katrina Falkner. 18 July 2013. + + Flush+Reload is a cache side-channel attack that monitors access to + data in shared pages. In this paper we demonstrate how to use the + attack to extract private encryption keys from GnuPG. The high + resolution and low noise of the Flush+Reload attack enables a spy + program to recover over 98% of the bits of the private key in a + single decryption or signing round. Unlike previous attacks, the + attack targets the last level L3 cache. Consequently, the spy + program and the victim do not need to share the execution core of + the CPU. The attack is not limited to a traditional OS and can be + used in a virtualised environment, where it can attack programs + executing in a different VM. + +(cherry picked from commit 55237c8f6920c6629debd23db65e90b42a3767de) +--- + +diff -Naurp libgcrypt11-1.4.4.orig/mpi/mpi-pow.c libgcrypt11-1.4.4/mpi/mpi-pow.c +--- libgcrypt11-1.4.4.orig/mpi/mpi-pow.c 2008-12-02 23:59:30.000000000 -0800 ++++ libgcrypt11-1.4.4/mpi/mpi-pow.c 2013-07-30 17:45:37.000000000 -0700 +@@ -1,6 +1,7 @@ + /* mpi-pow.c - MPI functions for exponentiation + * Copyright (C) 1994, 1996, 1998, 2000, 2002 + * 2003 Free Software Foundation, Inc. ++ * 2013 g10 Code GmbH + * + * This file is part of Libgcrypt. + * +@@ -231,7 +232,13 @@ gcry_mpi_powm (gcry_mpi_t res, + tp = rp; rp = xp; xp = tp; + rsize = xsize; + +- if ( (mpi_limb_signed_t)e < 0 ) ++ /* To mitigate the Yarom/Falkner flush+reload cache ++ * side-channel attack on the RSA secret exponent, we do ++ * the multiplication regardless of the value of the ++ * high-bit of E. But to avoid this performance penalty ++ * we do it only if the exponent has been stored in secure ++ * memory and we can thus assume it is a secret exponent. */ ++ if (esec || (mpi_limb_signed_t)e < 0) + { + /*mpih_mul( xp, rp, rsize, bp, bsize );*/ + if( bsize < KARATSUBA_THRESHOLD ) +@@ -246,7 +253,9 @@ gcry_mpi_powm (gcry_mpi_t res, + _gcry_mpih_divrem(xp + msize, 0, xp, xsize, mp, msize); + xsize = msize; + } +- ++ } ++ if ( (mpi_limb_signed_t)e < 0 ) ++ { + tp = rp; rp = xp; xp = tp; + rsize = xsize; + } only in patch2: unchanged: --- libgcrypt11-1.4.4.orig/debian/patches/23-CVE-2014-5270.diff +++ libgcrypt11-1.4.4/debian/patches/23-CVE-2014-5270.diff @@ -0,0 +1,493 @@ +Backport of: + +From 62e8e1283268f1d3b6d0cfb2fc4e7835bbcdaab6 Mon Sep 17 00:00:00 2001 +From: NIIBE Yutaka +Date: Wed, 2 Oct 2013 09:27:09 +0900 +Subject: [PATCH] mpi: mpi-pow improvement. + +* mpi/mpi-pow.c (gcry_mpi_powm): New implementation of left-to-right +k-ary exponentiation. +-- + +Signed-off-by: NIIBE Yutaka + +For the Yarom/Falkner flush+reload cache side-channel attack, we +changed the code so that it always calls the multiplication routine +(even if we can skip it to get result). This results some performance +regression. + +This change is for recovering performance with efficient algorithm. + +(cherry picked from commit 45aa6131e93fac89d46733b3436d960f35fb99b2) +--- + mpi/mpi-pow.c | 454 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 454 insertions(+) + +diff -Nur -x '*.orig' -x '*~' libgcrypt11-1.4.4/mpi/mpi-pow.c libgcrypt11-1.4.4.new/mpi/mpi-pow.c +--- libgcrypt11-1.4.4/mpi/mpi-pow.c 2014-08-19 09:11:10.167328245 -0400 ++++ libgcrypt11-1.4.4.new/mpi/mpi-pow.c 2014-08-19 09:12:24.527331808 -0400 +@@ -34,6 +34,14 @@ + #include "longlong.h" + + ++/* ++ * When you need old implementation, please add compilation option ++ * -DUSE_ALGORITHM_SIMPLE_EXPONENTIATION ++ * or expose this line: ++#define USE_ALGORITHM_SIMPLE_EXPONENTIATION 1 ++ */ ++ ++#if defined(USE_ALGORITHM_SIMPLE_EXPONENTIATION) + /**************** + * RES = BASE ^ EXPO mod MOD + */ +@@ -332,4 +340,449 @@ + if (tspace) + _gcry_mpi_free_limb_space( tspace, 0 ); + } ++#else ++/** ++ * Internal function to compute ++ * ++ * X = R * S mod M ++ * ++ * and set the size of X at the pointer XSIZE_P. ++ * Use karatsuba structure at KARACTX_P. ++ * ++ * Condition: ++ * RSIZE >= SSIZE ++ * Enough space for X is allocated beforehand. ++ * ++ * For generic cases, we can/should use gcry_mpi_mulm. ++ * This function is use for specific internal case. ++ */ ++static void ++mul_mod (mpi_ptr_t xp, mpi_size_t *xsize_p, ++ mpi_ptr_t rp, mpi_size_t rsize, ++ mpi_ptr_t sp, mpi_size_t ssize, ++ mpi_ptr_t mp, mpi_size_t msize, ++ struct karatsuba_ctx *karactx_p) ++{ ++ if( ssize < KARATSUBA_THRESHOLD ) ++ _gcry_mpih_mul ( xp, rp, rsize, sp, ssize ); ++ else ++ _gcry_mpih_mul_karatsuba_case (xp, rp, rsize, sp, ssize, karactx_p); ++ ++ if (rsize + ssize > msize) ++ { ++ _gcry_mpih_divrem (xp + msize, 0, xp, rsize + ssize, mp, msize); ++ *xsize_p = msize; ++ } ++ else ++ *xsize_p = rsize + ssize; ++} ++ ++#define SIZE_B_2I3 ((1 << (5 - 1)) - 1) ++ ++/**************** ++ * RES = BASE ^ EXPO mod MOD ++ * ++ * To mitigate the Yarom/Falkner flush+reload cache side-channel ++ * attack on the RSA secret exponent, we don't use the square ++ * routine but multiplication. ++ * ++ * Reference: ++ * Handbook of Applied Cryptography ++ * Algorithm 14.83: Modified left-to-right k-ary exponentiation ++ */ ++void ++gcry_mpi_powm (gcry_mpi_t res, ++ gcry_mpi_t base, gcry_mpi_t expo, gcry_mpi_t mod) ++{ ++ /* Pointer to the limbs of the arguments, their size and signs. */ ++ mpi_ptr_t rp, ep, mp, bp; ++ mpi_size_t esize, msize, bsize, rsize; ++ int msign, bsign, rsign; ++ /* Flags telling the secure allocation status of the arguments. */ ++ int esec, msec, bsec; ++ /* Size of the result including space for temporary values. */ ++ mpi_size_t size; ++ /* Helper. */ ++ int mod_shift_cnt; ++ int negative_result; ++ mpi_ptr_t mp_marker = NULL; ++ mpi_ptr_t bp_marker = NULL; ++ mpi_ptr_t ep_marker = NULL; ++ mpi_ptr_t xp_marker = NULL; ++ unsigned int mp_nlimbs = 0; ++ unsigned int bp_nlimbs = 0; ++ unsigned int ep_nlimbs = 0; ++ unsigned int xp_nlimbs = 0; ++ mpi_ptr_t b_2i3[SIZE_B_2I3]; /* Pre-computed array: BASE^3, ^5, ^7, ... */ ++ mpi_size_t b_2i3size[SIZE_B_2I3]; ++ mpi_size_t W; ++ mpi_ptr_t base_u; ++ mpi_size_t base_u_size; ++ ++ esize = expo->nlimbs; ++ msize = mod->nlimbs; ++ size = 2 * msize; ++ msign = mod->sign; ++ ++ if (esize * BITS_PER_MPI_LIMB > 512) ++ W = 5; ++ else if (esize * BITS_PER_MPI_LIMB > 256) ++ W = 4; ++ else if (esize * BITS_PER_MPI_LIMB > 128) ++ W = 3; ++ else if (esize * BITS_PER_MPI_LIMB > 64) ++ W = 2; ++ else ++ W = 1; ++ ++ esec = mpi_is_secure(expo); ++ msec = mpi_is_secure(mod); ++ bsec = mpi_is_secure(base); ++ ++ rp = res->d; ++ ep = expo->d; ++ ++ if (!msize) ++ _gcry_divide_by_zero(); ++ ++ if (!esize) ++ { ++ /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 depending ++ on if MOD equals 1. */ ++ res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; ++ if (res->nlimbs) ++ { ++ RESIZE_IF_NEEDED (res, 1); ++ rp = res->d; ++ rp[0] = 1; ++ } ++ res->sign = 0; ++ goto leave; ++ } ++ ++ /* Normalize MOD (i.e. make its most significant bit set) as ++ required by mpn_divrem. This will make the intermediate values ++ in the calculation slightly larger, but the correct result is ++ obtained after a final reduction using the original MOD value. */ ++ mp_nlimbs = msec? msize:0; ++ mp = mp_marker = mpi_alloc_limb_space(msize, msec); ++ count_leading_zeros (mod_shift_cnt, mod->d[msize-1]); ++ if (mod_shift_cnt) ++ _gcry_mpih_lshift (mp, mod->d, msize, mod_shift_cnt); ++ else ++ MPN_COPY( mp, mod->d, msize ); ++ ++ bsize = base->nlimbs; ++ bsign = base->sign; ++ if (bsize > msize) ++ { ++ /* The base is larger than the module. Reduce it. ++ ++ Allocate (BSIZE + 1) with space for remainder and quotient. ++ (The quotient is (bsize - msize + 1) limbs.) */ ++ bp_nlimbs = bsec ? (bsize + 1):0; ++ bp = bp_marker = mpi_alloc_limb_space( bsize + 1, bsec ); ++ MPN_COPY ( bp, base->d, bsize ); ++ /* We don't care about the quotient, store it above the ++ * remainder, at BP + MSIZE. */ ++ _gcry_mpih_divrem( bp + msize, 0, bp, bsize, mp, msize ); ++ bsize = msize; ++ /* Canonicalize the base, since we are going to multiply with it ++ quite a few times. */ ++ MPN_NORMALIZE( bp, bsize ); ++ } ++ else ++ bp = base->d; ++ ++ if (!bsize) ++ { ++ res->nlimbs = 0; ++ res->sign = 0; ++ goto leave; ++ } ++ ++ ++ /* Make BASE, EXPO and MOD not overlap with RES. */ ++ if ( rp == bp ) ++ { ++ /* RES and BASE are identical. Allocate temp. space for BASE. */ ++ gcry_assert (!bp_marker); ++ bp_nlimbs = bsec? bsize:0; ++ bp = bp_marker = mpi_alloc_limb_space( bsize, bsec ); ++ MPN_COPY(bp, rp, bsize); ++ } ++ if ( rp == ep ) ++ { ++ /* RES and EXPO are identical. Allocate temp. space for EXPO. */ ++ ep_nlimbs = esec? esize:0; ++ ep = ep_marker = mpi_alloc_limb_space( esize, esec ); ++ MPN_COPY(ep, rp, esize); ++ } ++ if ( rp == mp ) ++ { ++ /* RES and MOD are identical. Allocate temporary space for MOD.*/ ++ gcry_assert (!mp_marker); ++ mp_nlimbs = msec?msize:0; ++ mp = mp_marker = mpi_alloc_limb_space( msize, msec ); ++ MPN_COPY(mp, rp, msize); ++ } ++ ++ /* Copy base to the result. */ ++ if (res->alloced < size) ++ { ++ mpi_resize (res, size); ++ rp = res->d; ++ } ++ ++ /* Main processing. */ ++ { ++ mpi_size_t i, j; ++ mpi_ptr_t xp; ++ mpi_size_t xsize; ++ int c; ++ mpi_limb_t e; ++ mpi_limb_t carry_limb; ++ struct karatsuba_ctx karactx; ++ mpi_ptr_t tp; ++ ++ xp_nlimbs = msec? (2 * (msize + 1)):0; ++ xp = xp_marker = mpi_alloc_limb_space( 2 * (msize + 1), msec ); ++ ++ memset( &karactx, 0, sizeof karactx ); ++ negative_result = (ep[0] & 1) && bsign; ++ ++ /* Precompute B_2I3[], BASE^(2 * i + 3), BASE^3, ^5, ^7, ... */ ++ if (W > 1) /* X := BASE^2 */ ++ mul_mod (xp, &xsize, bp, bsize, bp, bsize, mp, msize, &karactx); ++ for (i = 0; i < (1 << (W - 1)) - 1; i++) ++ { /* B_2I3[i] = BASE^(2 * i + 3) */ ++ if (i == 0) ++ { ++ base_u = bp; ++ base_u_size = bsize; ++ } ++ else ++ { ++ base_u = b_2i3[i-1]; ++ base_u_size = b_2i3size[i-1]; ++ } ++ ++ if (xsize >= base_u_size) ++ mul_mod (rp, &rsize, xp, xsize, base_u, base_u_size, ++ mp, msize, &karactx); ++ else ++ mul_mod (rp, &rsize, base_u, base_u_size, xp, xsize, ++ mp, msize, &karactx); ++ b_2i3[i] = mpi_alloc_limb_space (rsize, esec); ++ b_2i3size[i] = rsize; ++ MPN_COPY (b_2i3[i], rp, rsize); ++ } ++ ++ i = esize - 1; ++ ++ /* Main loop. ++ ++ Make the result be pointed to alternately by XP and RP. This ++ helps us avoid block copying, which would otherwise be ++ necessary with the overlap restrictions of ++ _gcry_mpih_divmod. With 50% probability the result after this ++ loop will be in the area originally pointed by RP (==RES->d), ++ and with 50% probability in the area originally pointed to by XP. */ ++ rsign = 0; ++ if (W == 1) ++ { ++ rsize = bsize; ++ } ++ else ++ { ++ rsize = msize; ++ MPN_ZERO (rp, rsize); ++ } ++ MPN_COPY ( rp, bp, bsize ); ++ ++ e = ep[i]; ++ count_leading_zeros (c, e); ++ e = (e << c) << 1; ++ c = BITS_PER_MPI_LIMB - 1 - c; ++ ++ j = 0; ++ ++ for (;;) ++ if (e == 0) ++ { ++ j += c; ++ i--; ++ if ( i < 0 ) ++ { ++ c = 0; ++ break; ++ } ++ ++ e = ep[i]; ++ c = BITS_PER_MPI_LIMB; ++ } ++ else ++ { ++ int c0; ++ mpi_limb_t e0; ++ ++ count_leading_zeros (c0, e); ++ e = (e << c0); ++ c -= c0; ++ j += c0; ++ ++ if (c >= W) ++ { ++ e0 = (e >> (BITS_PER_MPI_LIMB - W)); ++ e = (e << W); ++ c -= W; ++ } ++ else ++ { ++ i--; ++ if ( i < 0 ) ++ { ++ e = (e >> (BITS_PER_MPI_LIMB - c)); ++ break; ++ } ++ ++ c0 = c; ++ e0 = (e >> (BITS_PER_MPI_LIMB - W)) ++ | (ep[i] >> (BITS_PER_MPI_LIMB - W + c0)); ++ e = (ep[i] << (W - c0)); ++ c = BITS_PER_MPI_LIMB - W + c0; ++ } ++ ++ count_trailing_zeros (c0, e0); ++ e0 = (e0 >> c0) >> 1; ++ ++ for (j += W - c0; j; j--) ++ { ++ mul_mod (xp, &xsize, rp, rsize, rp, rsize, mp, msize, &karactx); ++ tp = rp; rp = xp; xp = tp; ++ rsize = xsize; ++ } ++ ++ if (e0 == 0) ++ { ++ base_u = bp; ++ base_u_size = bsize; ++ } ++ else ++ { ++ base_u = b_2i3[e0 - 1]; ++ base_u_size = b_2i3size[e0 -1]; ++ } ++ ++ mul_mod (xp, &xsize, rp, rsize, base_u, base_u_size, ++ mp, msize, &karactx); ++ tp = rp; rp = xp; xp = tp; ++ rsize = xsize; ++ ++ j = c0; ++ } ++ ++ if (c != 0) ++ { ++ j += c; ++ count_trailing_zeros (c, e); ++ e = (e >> c); ++ j -= c; ++ } ++ ++ while (j--) ++ { ++ mul_mod (xp, &xsize, rp, rsize, rp, rsize, mp, msize, &karactx); ++ tp = rp; rp = xp; xp = tp; ++ rsize = xsize; ++ } ++ ++ if (e != 0) ++ { ++ if ((e>>1) == 0) ++ { ++ base_u = bp; ++ base_u_size = bsize; ++ } ++ else ++ { ++ base_u = b_2i3[(e>>1) - 1]; ++ base_u_size = b_2i3size[(e>>1) -1]; ++ } ++ ++ mul_mod (xp, &xsize, rp, rsize, base_u, base_u_size, ++ mp, msize, &karactx); ++ tp = rp; rp = xp; xp = tp; ++ rsize = xsize; ++ ++ for (; c; c--) ++ { ++ mul_mod (xp, &xsize, rp, rsize, rp, rsize, mp, msize, &karactx); ++ tp = rp; rp = xp; xp = tp; ++ rsize = xsize; ++ } ++ } ++ ++ /* We shifted MOD, the modulo reduction argument, left ++ MOD_SHIFT_CNT steps. Adjust the result by reducing it with the ++ original MOD. ++ ++ Also make sure the result is put in RES->d (where it already ++ might be, see above). */ ++ if ( mod_shift_cnt ) ++ { ++ carry_limb = _gcry_mpih_lshift( res->d, rp, rsize, mod_shift_cnt); ++ rp = res->d; ++ if ( carry_limb ) ++ { ++ rp[rsize] = carry_limb; ++ rsize++; ++ } ++ } ++ else if (res->d != rp) ++ { ++ MPN_COPY (res->d, rp, rsize); ++ rp = res->d; ++ } ++ ++ if ( rsize >= msize ) ++ { ++ _gcry_mpih_divrem(rp + msize, 0, rp, rsize, mp, msize); ++ rsize = msize; ++ } + ++ /* Remove any leading zero words from the result. */ ++ if ( mod_shift_cnt ) ++ _gcry_mpih_rshift( rp, rp, rsize, mod_shift_cnt); ++ MPN_NORMALIZE (rp, rsize); ++ ++ _gcry_mpih_release_karatsuba_ctx (&karactx ); ++ for (i = 0; i < (1 << (W - 1)) - 1; i++) ++ _gcry_mpi_free_limb_space( b_2i3[i], esec ? b_2i3size[i] : 0 ); ++ } ++ ++ /* Fixup for negative results. */ ++ if ( negative_result && rsize ) ++ { ++ if ( mod_shift_cnt ) ++ _gcry_mpih_rshift( mp, mp, msize, mod_shift_cnt); ++ _gcry_mpih_sub( rp, mp, msize, rp, rsize); ++ rsize = msize; ++ rsign = msign; ++ MPN_NORMALIZE(rp, rsize); ++ } ++ gcry_assert (res->d == rp); ++ res->nlimbs = rsize; ++ res->sign = rsign; ++ ++ leave: ++ if (mp_marker) ++ _gcry_mpi_free_limb_space( mp_marker, mp_nlimbs ); ++ if (bp_marker) ++ _gcry_mpi_free_limb_space( bp_marker, bp_nlimbs ); ++ if (ep_marker) ++ _gcry_mpi_free_limb_space( ep_marker, ep_nlimbs ); ++ if (xp_marker) ++ _gcry_mpi_free_limb_space( xp_marker, xp_nlimbs ); ++} ++#endif only in patch2: unchanged: --- libgcrypt11-1.4.4.orig/debian/patches/22-add_gcry_divide_by_zero.diff +++ libgcrypt11-1.4.4/debian/patches/22-add_gcry_divide_by_zero.diff @@ -0,0 +1,90 @@ +Backport of: + +From 6c3598f1f6a6f2548b60a31ce3c0dd9885558a4f Mon Sep 17 00:00:00 2001 +From: Xi Wang +Date: Tue, 14 Aug 2012 18:54:40 -0400 +Subject: [PATCH] Replace deliberate division by zero with + _gcry_divide_by_zero. + +* mpi/mpi-pow.c: Replace 1 / msize. +* mpi/mpih-div.c: Replace 1 / dsize. +* src/misc.c: Add _gcry_divide_by_zero. +-- + +1) Division by zero doesn't "provoke a signal" on architectures + like PowerPC. + +2) C compilers like clang will optimize away these divisions, even + though the code tries "to make the compiler not remove" them. + +This patch redirects these cases to _gcry_divide_by_zero. + +(cherry picked from commit 2c54c4da19d3a79e9f749740828026dd41f0521a) +--- + mpi/mpi-pow.c | 2 +- + mpi/mpih-div.c | 5 ++--- + src/g10lib.h | 2 ++ + src/misc.c | 8 ++++++++ + 4 files changed, 13 insertions(+), 4 deletions(-) + +diff -Nur -x '*.orig' -x '*~' libgcrypt11-1.4.4/mpi/mpih-div.c libgcrypt11-1.4.4.new/mpi/mpih-div.c +--- libgcrypt11-1.4.4/mpi/mpih-div.c 2008-08-19 11:20:03.000000000 -0400 ++++ libgcrypt11-1.4.4.new/mpi/mpih-div.c 2014-08-19 09:10:35.615326589 -0400 +@@ -212,9 +212,8 @@ + + switch(dsize) { + case 0: +- /* We are asked to divide by zero, so go ahead and do it! (To make +- the compiler not remove this statement, return the value.) */ +- return 1 / dsize; ++ _gcry_divide_by_zero(); ++ break; + + case 1: + { +diff -Nur -x '*.orig' -x '*~' libgcrypt11-1.4.4/mpi/mpi-pow.c libgcrypt11-1.4.4.new/mpi/mpi-pow.c +--- libgcrypt11-1.4.4/mpi/mpi-pow.c 2014-08-19 09:10:17.755325733 -0400 ++++ libgcrypt11-1.4.4.new/mpi/mpi-pow.c 2014-08-19 09:10:35.615326589 -0400 +@@ -78,7 +78,7 @@ + ep = expo->d; + + if (!msize) +- msize = 1 / msize; /* Provoke a signal. */ ++ _gcry_divide_by_zero(); + + if (!esize) + { +diff -Nur -x '*.orig' -x '*~' libgcrypt11-1.4.4/src/g10lib.h libgcrypt11-1.4.4.new/src/g10lib.h +--- libgcrypt11-1.4.4/src/g10lib.h 2008-11-26 05:11:44.000000000 -0500 ++++ libgcrypt11-1.4.4.new/src/g10lib.h 2014-08-19 09:10:35.615326589 -0400 +@@ -101,6 +101,8 @@ + void _gcry_assert_failed (const char *expr, const char *file, int line); + #endif + ++void _gcry_divide_by_zero (void) JNLIB_GCC_A_NR; ++ + const char *_gcry_gettext (const char *key) GCC_ATTR_FORMAT_ARG(1); + void _gcry_fatal_error(int rc, const char *text ) JNLIB_GCC_A_NR; + void _gcry_log( int level, const char *fmt, ... ) JNLIB_GCC_A_PRINTF(2,3); +diff -Nur -x '*.orig' -x '*~' libgcrypt11-1.4.4/src/misc.c libgcrypt11-1.4.4.new/src/misc.c +--- libgcrypt11-1.4.4/src/misc.c 2008-09-03 06:04:42.000000000 -0400 ++++ libgcrypt11-1.4.4.new/src/misc.c 2014-08-19 09:10:35.615326589 -0400 +@@ -19,6 +19,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -294,3 +295,10 @@ + if (bytes > 0) + _gcry_burn_stack (bytes); + } ++ ++void ++_gcry_divide_by_zero (void) ++{ ++ errno = EDOM; ++ _gcry_fatal_error (gpg_err_code_from_errno (errno), "divide by zero"); ++} only in patch2: unchanged: --- libgcrypt11-1.4.4.orig/debian/patches/no_global_init_during_thread_callbacks.diff +++ libgcrypt11-1.4.4/debian/patches/no_global_init_during_thread_callbacks.diff @@ -0,0 +1,12 @@ +diff -Nur -x '*.orig' -x '*~' libgcrypt11/src/global.c libgcrypt11.new/src/global.c +--- libgcrypt11/src/global.c 2012-05-24 16:25:08.500282000 -0400 ++++ libgcrypt11.new/src/global.c 2012-05-24 16:31:09.986542239 -0400 +@@ -440,8 +440,6 @@ + + case GCRYCTL_SET_THREAD_CBS: + err = ath_install (va_arg (arg_ptr, void *), any_init_done); +- if (! err) +- global_init (); + break; + + case GCRYCTL_FAST_POLL: