Austin Schuh | dace2a6 | 2020-08-18 10:56:48 -0700 | [diff] [blame] | 1 | /* Linear Congruential pseudo-random number generator functions. |
| 2 | |
| 3 | Copyright 1999-2003, 2005, 2015 Free Software Foundation, Inc. |
| 4 | |
| 5 | This file is part of the GNU MP Library. |
| 6 | |
| 7 | The GNU MP Library is free software; you can redistribute it and/or modify |
| 8 | it under the terms of either: |
| 9 | |
| 10 | * the GNU Lesser General Public License as published by the Free |
| 11 | Software Foundation; either version 3 of the License, or (at your |
| 12 | option) any later version. |
| 13 | |
| 14 | or |
| 15 | |
| 16 | * the GNU General Public License as published by the Free Software |
| 17 | Foundation; either version 2 of the License, or (at your option) any |
| 18 | later version. |
| 19 | |
| 20 | or both in parallel, as here. |
| 21 | |
| 22 | The GNU MP Library is distributed in the hope that it will be useful, but |
| 23 | WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| 24 | or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 25 | for more details. |
| 26 | |
| 27 | You should have received copies of the GNU General Public License and the |
| 28 | GNU Lesser General Public License along with the GNU MP Library. If not, |
| 29 | see https://www.gnu.org/licenses/. */ |
| 30 | |
| 31 | #include "gmp-impl.h" |
| 32 | |
| 33 | |
| 34 | /* State structure for LC, the RNG_STATE() pointer in a gmp_randstate_t. |
| 35 | |
| 36 | _mp_seed holds the current seed value, in the range 0 to 2^m2exp-1. |
| 37 | SIZ(_mp_seed) is fixed at BITS_TO_LIMBS(_mp_m2exp) and the value is |
| 38 | padded with high zero limbs if necessary. ALLOC(_mp_seed) is the current |
| 39 | size of PTR(_mp_seed) in the usual way. There only needs to be |
| 40 | BITS_TO_LIMBS(_mp_m2exp) allocated, but the mpz functions in the |
| 41 | initialization and seeding end up making it a bit more than this. |
| 42 | |
| 43 | _mp_a is the "a" multiplier, in the range 0 to 2^m2exp-1. SIZ(_mp_a) is |
| 44 | the size of the value in the normal way for an mpz_t, except that a value |
| 45 | of zero is held with SIZ(_mp_a)==1 and PTR(_mp_a)[0]==0. This makes it |
| 46 | easy to call mpn_mul, and the case of a==0 is highly un-random and not |
| 47 | worth any trouble to optimize. |
| 48 | |
| 49 | {_cp,_cn} is the "c" addend. Normally _cn is 1, but when nails are in |
| 50 | use a ulong can be bigger than one limb, and in this case _cn is 2 if |
| 51 | necessary. c==0 is stored as _cp[0]==0 and _cn==1, which makes it easy |
| 52 | to call __GMPN_ADD. c==0 is fairly un-random so isn't worth optimizing. |
| 53 | |
| 54 | _mp_m2exp gives the modulus, namely 2^m2exp. We demand m2exp>=1, since |
| 55 | m2exp==0 would mean no bits at all out of each iteration, which makes no |
| 56 | sense. */ |
| 57 | |
| 58 | typedef struct { |
| 59 | mpz_t _mp_seed; |
| 60 | mpz_t _mp_a; |
| 61 | mp_size_t _cn; |
| 62 | mp_limb_t _cp[LIMBS_PER_ULONG]; |
| 63 | unsigned long _mp_m2exp; |
| 64 | } gmp_rand_lc_struct; |
| 65 | |
| 66 | |
| 67 | /* lc (rp, state) -- Generate next number in LC sequence. Return the |
| 68 | number of valid bits in the result. Discards the lower half of the |
| 69 | result. */ |
| 70 | |
| 71 | static unsigned long int |
| 72 | lc (mp_ptr rp, gmp_randstate_t rstate) |
| 73 | { |
| 74 | mp_ptr tp, seedp, ap; |
| 75 | mp_size_t ta; |
| 76 | mp_size_t tn, seedn, an; |
| 77 | unsigned long int m2exp; |
| 78 | unsigned long int bits; |
| 79 | int cy; |
| 80 | mp_size_t xn; |
| 81 | gmp_rand_lc_struct *p; |
| 82 | TMP_DECL; |
| 83 | |
| 84 | p = (gmp_rand_lc_struct *) RNG_STATE (rstate); |
| 85 | |
| 86 | m2exp = p->_mp_m2exp; |
| 87 | |
| 88 | seedp = PTR (p->_mp_seed); |
| 89 | seedn = SIZ (p->_mp_seed); |
| 90 | |
| 91 | ap = PTR (p->_mp_a); |
| 92 | an = SIZ (p->_mp_a); |
| 93 | |
| 94 | /* Allocate temporary storage. Let there be room for calculation of |
| 95 | (A * seed + C) % M, or M if bigger than that. */ |
| 96 | |
| 97 | TMP_MARK; |
| 98 | |
| 99 | ta = an + seedn + 1; |
| 100 | tn = BITS_TO_LIMBS (m2exp); |
| 101 | if (ta <= tn) /* that is, if (ta < tn + 1) */ |
| 102 | { |
| 103 | mp_size_t tmp = an + seedn; |
| 104 | ta = tn + 1; |
| 105 | tp = TMP_ALLOC_LIMBS (ta); |
| 106 | MPN_ZERO (&tp[tmp], ta - tmp); /* mpn_mul won't zero it out. */ |
| 107 | } |
| 108 | else |
| 109 | tp = TMP_ALLOC_LIMBS (ta); |
| 110 | |
| 111 | /* t = a * seed. NOTE: an is always > 0; see initialization. */ |
| 112 | ASSERT (seedn >= an && an > 0); |
| 113 | mpn_mul (tp, seedp, seedn, ap, an); |
| 114 | |
| 115 | /* t = t + c. NOTE: tn is always >= p->_cn (precondition for __GMPN_ADD); |
| 116 | see initialization. */ |
| 117 | ASSERT (tn >= p->_cn); |
| 118 | __GMPN_ADD (cy, tp, tp, tn, p->_cp, p->_cn); |
| 119 | |
| 120 | /* t = t % m */ |
| 121 | tp[m2exp / GMP_NUMB_BITS] &= (CNST_LIMB (1) << m2exp % GMP_NUMB_BITS) - 1; |
| 122 | |
| 123 | /* Save result as next seed. */ |
| 124 | MPN_COPY (PTR (p->_mp_seed), tp, tn); |
| 125 | |
| 126 | /* Discard the lower m2exp/2 of the result. */ |
| 127 | bits = m2exp / 2; |
| 128 | xn = bits / GMP_NUMB_BITS; |
| 129 | |
| 130 | tn -= xn; |
| 131 | if (tn > 0) |
| 132 | { |
| 133 | unsigned int cnt = bits % GMP_NUMB_BITS; |
| 134 | if (cnt != 0) |
| 135 | { |
| 136 | mpn_rshift (tp, tp + xn, tn, cnt); |
| 137 | MPN_COPY_INCR (rp, tp, xn + 1); |
| 138 | } |
| 139 | else /* Even limb boundary. */ |
| 140 | MPN_COPY_INCR (rp, tp + xn, tn); |
| 141 | } |
| 142 | |
| 143 | TMP_FREE; |
| 144 | |
| 145 | /* Return number of valid bits in the result. */ |
| 146 | return (m2exp + 1) / 2; |
| 147 | } |
| 148 | |
| 149 | |
| 150 | /* Obtain a sequence of random numbers. */ |
| 151 | static void |
| 152 | randget_lc (gmp_randstate_t rstate, mp_ptr rp, unsigned long int nbits) |
| 153 | { |
| 154 | unsigned long int rbitpos; |
| 155 | int chunk_nbits; |
| 156 | mp_ptr tp; |
| 157 | mp_size_t tn; |
| 158 | gmp_rand_lc_struct *p; |
| 159 | TMP_DECL; |
| 160 | |
| 161 | p = (gmp_rand_lc_struct *) RNG_STATE (rstate); |
| 162 | |
| 163 | TMP_MARK; |
| 164 | |
| 165 | chunk_nbits = p->_mp_m2exp / 2; |
| 166 | tn = BITS_TO_LIMBS (chunk_nbits); |
| 167 | |
| 168 | tp = TMP_ALLOC_LIMBS (tn); |
| 169 | |
| 170 | rbitpos = 0; |
| 171 | while (rbitpos + chunk_nbits <= nbits) |
| 172 | { |
| 173 | mp_ptr r2p = rp + rbitpos / GMP_NUMB_BITS; |
| 174 | |
| 175 | if (rbitpos % GMP_NUMB_BITS != 0) |
| 176 | { |
| 177 | mp_limb_t savelimb, rcy; |
| 178 | /* Target of new chunk is not bit aligned. Use temp space |
| 179 | and align things by shifting it up. */ |
| 180 | lc (tp, rstate); |
| 181 | savelimb = r2p[0]; |
| 182 | rcy = mpn_lshift (r2p, tp, tn, rbitpos % GMP_NUMB_BITS); |
| 183 | r2p[0] |= savelimb; |
| 184 | /* bogus */ |
| 185 | if ((chunk_nbits % GMP_NUMB_BITS + rbitpos % GMP_NUMB_BITS) |
| 186 | > GMP_NUMB_BITS) |
| 187 | r2p[tn] = rcy; |
| 188 | } |
| 189 | else |
| 190 | { |
| 191 | /* Target of new chunk is bit aligned. Let `lc' put bits |
| 192 | directly into our target variable. */ |
| 193 | lc (r2p, rstate); |
| 194 | } |
| 195 | rbitpos += chunk_nbits; |
| 196 | } |
| 197 | |
| 198 | /* Handle last [0..chunk_nbits) bits. */ |
| 199 | if (rbitpos != nbits) |
| 200 | { |
| 201 | mp_ptr r2p = rp + rbitpos / GMP_NUMB_BITS; |
| 202 | int last_nbits = nbits - rbitpos; |
| 203 | tn = BITS_TO_LIMBS (last_nbits); |
| 204 | lc (tp, rstate); |
| 205 | if (rbitpos % GMP_NUMB_BITS != 0) |
| 206 | { |
| 207 | mp_limb_t savelimb, rcy; |
| 208 | /* Target of new chunk is not bit aligned. Use temp space |
| 209 | and align things by shifting it up. */ |
| 210 | savelimb = r2p[0]; |
| 211 | rcy = mpn_lshift (r2p, tp, tn, rbitpos % GMP_NUMB_BITS); |
| 212 | r2p[0] |= savelimb; |
| 213 | if (rbitpos + tn * GMP_NUMB_BITS - rbitpos % GMP_NUMB_BITS < nbits) |
| 214 | r2p[tn] = rcy; |
| 215 | } |
| 216 | else |
| 217 | { |
| 218 | MPN_COPY (r2p, tp, tn); |
| 219 | } |
| 220 | /* Mask off top bits if needed. */ |
| 221 | if (nbits % GMP_NUMB_BITS != 0) |
| 222 | rp[nbits / GMP_NUMB_BITS] |
| 223 | &= ~(~CNST_LIMB (0) << nbits % GMP_NUMB_BITS); |
| 224 | } |
| 225 | |
| 226 | TMP_FREE; |
| 227 | } |
| 228 | |
| 229 | |
| 230 | static void |
| 231 | randseed_lc (gmp_randstate_t rstate, mpz_srcptr seed) |
| 232 | { |
| 233 | gmp_rand_lc_struct *p = (gmp_rand_lc_struct *) RNG_STATE (rstate); |
| 234 | mpz_ptr seedz = p->_mp_seed; |
| 235 | mp_size_t seedn = BITS_TO_LIMBS (p->_mp_m2exp); |
| 236 | |
| 237 | /* Store p->_mp_seed as an unnormalized integer with size enough |
| 238 | for numbers up to 2^m2exp-1. That size can't be zero. */ |
| 239 | mpz_fdiv_r_2exp (seedz, seed, p->_mp_m2exp); |
| 240 | MPN_ZERO (&PTR (seedz)[SIZ (seedz)], seedn - SIZ (seedz)); |
| 241 | SIZ (seedz) = seedn; |
| 242 | } |
| 243 | |
| 244 | |
| 245 | static void |
| 246 | randclear_lc (gmp_randstate_t rstate) |
| 247 | { |
| 248 | gmp_rand_lc_struct *p = (gmp_rand_lc_struct *) RNG_STATE (rstate); |
| 249 | |
| 250 | mpz_clear (p->_mp_seed); |
| 251 | mpz_clear (p->_mp_a); |
| 252 | (*__gmp_free_func) (p, sizeof (gmp_rand_lc_struct)); |
| 253 | } |
| 254 | |
| 255 | static void randiset_lc (gmp_randstate_ptr, gmp_randstate_srcptr); |
| 256 | |
| 257 | static const gmp_randfnptr_t Linear_Congruential_Generator = { |
| 258 | randseed_lc, |
| 259 | randget_lc, |
| 260 | randclear_lc, |
| 261 | randiset_lc |
| 262 | }; |
| 263 | |
| 264 | static void |
| 265 | randiset_lc (gmp_randstate_ptr dst, gmp_randstate_srcptr src) |
| 266 | { |
| 267 | gmp_rand_lc_struct *dstp, *srcp; |
| 268 | |
| 269 | srcp = (gmp_rand_lc_struct *) RNG_STATE (src); |
| 270 | dstp = (gmp_rand_lc_struct *) (*__gmp_allocate_func) (sizeof (gmp_rand_lc_struct)); |
| 271 | |
| 272 | RNG_STATE (dst) = (mp_limb_t *) (void *) dstp; |
| 273 | RNG_FNPTR (dst) = (void *) &Linear_Congruential_Generator; |
| 274 | |
| 275 | /* _mp_seed and _mp_a might be unnormalized (high zero limbs), but |
| 276 | mpz_init_set won't worry about that */ |
| 277 | mpz_init_set (dstp->_mp_seed, srcp->_mp_seed); |
| 278 | mpz_init_set (dstp->_mp_a, srcp->_mp_a); |
| 279 | |
| 280 | dstp->_cn = srcp->_cn; |
| 281 | |
| 282 | dstp->_cp[0] = srcp->_cp[0]; |
| 283 | if (LIMBS_PER_ULONG > 1) |
| 284 | dstp->_cp[1] = srcp->_cp[1]; |
| 285 | if (LIMBS_PER_ULONG > 2) /* usually there's only 1 or 2 */ |
| 286 | MPN_COPY (dstp->_cp + 2, srcp->_cp + 2, LIMBS_PER_ULONG - 2); |
| 287 | |
| 288 | dstp->_mp_m2exp = srcp->_mp_m2exp; |
| 289 | } |
| 290 | |
| 291 | |
| 292 | void |
| 293 | gmp_randinit_lc_2exp (gmp_randstate_t rstate, |
| 294 | mpz_srcptr a, |
| 295 | unsigned long int c, |
| 296 | mp_bitcnt_t m2exp) |
| 297 | { |
| 298 | gmp_rand_lc_struct *p; |
| 299 | mp_size_t seedn = BITS_TO_LIMBS (m2exp); |
| 300 | |
| 301 | ASSERT_ALWAYS (m2exp != 0); |
| 302 | |
| 303 | p = __GMP_ALLOCATE_FUNC_TYPE (1, gmp_rand_lc_struct); |
| 304 | RNG_STATE (rstate) = (mp_limb_t *) (void *) p; |
| 305 | RNG_FNPTR (rstate) = (void *) &Linear_Congruential_Generator; |
| 306 | |
| 307 | /* allocate m2exp bits of space for p->_mp_seed, and initial seed "1" */ |
| 308 | mpz_init2 (p->_mp_seed, m2exp); |
| 309 | MPN_ZERO (PTR (p->_mp_seed), seedn); |
| 310 | SIZ (p->_mp_seed) = seedn; |
| 311 | PTR (p->_mp_seed)[0] = 1; |
| 312 | |
| 313 | /* "a", forced to 0 to 2^m2exp-1 */ |
| 314 | mpz_init (p->_mp_a); |
| 315 | mpz_fdiv_r_2exp (p->_mp_a, a, m2exp); |
| 316 | |
| 317 | /* Avoid SIZ(a) == 0 to avoid checking for special case in lc(). */ |
| 318 | if (SIZ (p->_mp_a) == 0) |
| 319 | { |
| 320 | SIZ (p->_mp_a) = 1; |
| 321 | MPZ_NEWALLOC (p->_mp_a, 1)[0] = CNST_LIMB (0); |
| 322 | } |
| 323 | |
| 324 | MPN_SET_UI (p->_cp, p->_cn, c); |
| 325 | |
| 326 | /* Internally we may discard any bits of c above m2exp. The following |
| 327 | code ensures that __GMPN_ADD in lc() will always work. */ |
| 328 | if (seedn < p->_cn) |
| 329 | p->_cn = (p->_cp[0] != 0); |
| 330 | |
| 331 | p->_mp_m2exp = m2exp; |
| 332 | } |