1
0
mirror of https://github.com/biergaizi/codecrypt synced 2024-06-29 18:33:10 +00:00
codecrypt/src/mce_qcmdpc.cpp

279 lines
6.8 KiB
C++
Raw Normal View History

2015-11-07 18:35:04 +00:00
/*
* This file is part of Codecrypt.
*
* Codecrypt is free software: you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or (at
* your option) any later version.
*
* Codecrypt is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Codecrypt. If not, see <http://www.gnu.org/licenses/>.
*/
#include "mce_qcmdpc.h"
#include "fft.h"
#include <cmath>
2015-11-07 18:35:04 +00:00
using namespace mce_qcmdpc;
using namespace std;
#include "iohelpers.h"
#include "ios.h"
2015-11-07 18:35:04 +00:00
int mce_qcmdpc::generate (pubkey&pub, privkey&priv, prng&rng,
uint block_size, uint block_count, uint wi,
uint t, uint rounds, uint delta)
{
uint i, j;
if (wi > block_size / 2) return 1; //safety
priv.H.resize (block_count);
pub.G.resize (block_count - 1);
2015-11-07 18:35:04 +00:00
/*
* Cyclic matrices are diagonalizable by FFT so this stuff gets pretty
* fast. Otherwise they behave like simple polynomials over GF(2) mod
* (1+x^n).
2015-11-07 18:35:04 +00:00
*/
vector<dcx> H_last_inv;
2015-11-07 18:35:04 +00:00
for (;;) {
//retry generating the rightmost block until it is invertible
bvector Hb;
Hb.resize (block_size, 0);
2015-11-07 18:35:04 +00:00
for (i = 0; i < wi; ++i)
for (uint pos = rng.random (block_size);
Hb[pos] ? 1 : (Hb[pos] = 1, 0);
2015-11-07 18:35:04 +00:00
pos = rng.random (block_size));
bvector xnm1, Hb_inv, tmp;
xnm1.resize (block_size + 1, 0);
xnm1[0] = 1;
xnm1[block_size] = 1; //poly (x^n-1) in gf(2)
/*
* TODO This is quadratic, speed it up.
*
* No one actually cares about keygen speed yet, but this can
* be done in O(n*log(n)) using Schönhage-Strassen algorithm.
* If speed is required (e.g. for SPF in some ssl replacement,
* *wink* *wink*), use libNTL's GF2X.
*
* NTL one uses simpler Karatsuba with ~O(n^1.58) which should
* (according to wikipedia) be faster for sizes under 32k bits
* because of constant factors involved.
*/
bvector rem = Hb.ext_gcd (xnm1, Hb_inv, tmp);
if (!rem.one()) continue; //not invertible, retry
if (Hb_inv.size() > block_size) continue; //totally weird.
Hb_inv.resize (block_size, 0); //pad polynomial with zeros
//if it is, save it to matrix
priv.H[block_count - 1] = Hb;
//precompute the fft of the inverted last block
fft (Hb_inv, H_last_inv);
break; //success
2015-11-07 18:35:04 +00:00
}
//generate the rests of matrix blocks, fill the G right away.
for (i = 0; i < block_count - 1; ++i) {
bvector Hb;
Hb.resize (block_size, 0);
2015-11-07 18:35:04 +00:00
//generate the polynomial corresponding to the first row
for (j = 0; j < wi; ++j)
for (uint pos = rng.random (block_size);
Hb[pos] ? 1 : (Hb[pos] = 1, 0);
2015-11-07 18:35:04 +00:00
pos = rng.random (block_size));
2015-11-07 18:35:04 +00:00
//save it to H
priv.H[i] = Hb;
2015-11-07 18:35:04 +00:00
//compute inv(H[last])*H[i]
vector<dcx> H;
fft (Hb, H);
for (j = 0; j < block_size; ++j)
H[j] *= H_last_inv[j];
fft (H, Hb);
2015-11-07 18:35:04 +00:00
//save it to G
pub.G[i] = Hb;
pub.G[i].resize (block_size, 0);
//for (j = 0; j < block_size; ++j) pub.G[i][j] = Hb[j];
2015-11-07 18:35:04 +00:00
}
//save the target params
pub.t = priv.t = t;
priv.rounds = rounds;
priv.delta = delta;
return 0;
}
int privkey::prepare()
{
return 0;
}
int pubkey::encrypt (const bvector& in, bvector&out, prng&rng)
{
uint s = cipher_size();
if (t > s) return 1;
//create the error vector
bvector e;
e.resize (s);
for (uint i = 0; i < t; ++i)
for (uint pos = rng.random (s);
e[pos] ? 1 : (e[pos] = 1, 0);
pos = rng.random (s));
return encrypt (in, out, e);
}
int pubkey::encrypt (const bvector&in, bvector&out, const bvector&errors)
{
uint ps = plain_size();
if (in.size() != ps) return 1;
uint bs = G[0].size();
uint blocks = G.size();
for (uint i = 1; i < blocks; ++i)
if (G[i].size() != bs) return 1; //prevent mangled keys
2015-11-07 18:35:04 +00:00
//first, the checksum part
vector<dcx> bcheck, Pd, Gd;
bcheck.resize (bs, dcx (0, 0)); //initially zero
bvector block;
/*
* G stores first row(s) of the circulant matrix blocks. Proceed block
* by block and construct the checksum.
*
* On a side note, it would be cool to store the G already pre-FFT'd,
* but the performance gain wouldn't be interesting enough to
* compensate for 128 times larger public key (each bit would get
* expanded to two doubles). Do it if you want to encrypt bulk data.
*/
2015-11-07 18:35:04 +00:00
for (size_t i = 0; i < blocks; ++i) {
in.get_block (i * bs, bs, block);
fft (block, Pd);
fft (G[i], Gd);
for (size_t j = 0; j < bs; ++j)
bcheck[j] += Pd[j] * Gd[j];
}
2015-11-07 18:35:04 +00:00
//compute the ciphertext
out = in;
fft (bcheck, block); //get the checksum part
out.append (block);
2015-11-07 18:35:04 +00:00
out.add (errors);
return 0;
}
int privkey::decrypt (const bvector & in, bvector & out)
{
bvector tmp_errors;
return decrypt (in, out, tmp_errors);
}
#include <vector>
int privkey::decrypt (const bvector & in_orig, bvector & out, bvector & errors)
{
uint i, j;
2015-11-07 18:35:04 +00:00
uint cs = cipher_size();
if (in_orig.size() != cs) return 1;
uint bs = H[0].size();
uint blocks = H.size();
for (i = 1; i < blocks; ++i) if (H[i].size() != bs) return 2;
bvector in = in_orig; //we will modify this.
2015-11-07 18:35:04 +00:00
/*
* probabilistic decoding!
*/
vector<dcx> synd_diag, tmp, Htmp;
synd_diag.resize (bs, dcx (0, 0));
//precompute the syndrome
for (i = 0; i < blocks; ++i) {
bvector b;
b.resize (bs, 0);
b.add_offset (in, bs * i, 0, bs);
fft (b, tmp);
fft (H[i], Htmp);
for (j = 0; j < bs; ++j) synd_diag[j] += Htmp[j] * tmp[j];
}
2015-11-07 18:35:04 +00:00
bvector (syndrome);
fft (synd_diag, syndrome);
2015-11-07 18:35:04 +00:00
vector<unsigned> unsat;
unsat.resize (cs, 0);
2015-11-07 18:35:04 +00:00
for (i = 0; i < rounds; ++i) {
/*
* count the correlations, abuse the sparsity of matrices.
*
* TODO this is the slowest part of the whole thing. It's all
* probabilistic, maybe there could be some potential to speed
* it up by discarding some (already missing) precision.
*/
for (j = 0; j < cs; ++j) unsat[j] = 0;
for (uint Hi = 0; Hi < cs; ++Hi)
if (H[Hi / bs][Hi % bs]) {
uint blk = Hi / bs;
for (j = 0; j < bs; ++j)
if (syndrome[j])
++unsat[blk * bs +
(j + cs - Hi) % bs];
}
uint max_unsat = 0;
for (j = 0; j < cs; ++j)
if (unsat[j] > max_unsat) max_unsat = unsat[j];
2015-11-07 18:35:04 +00:00
if (!max_unsat) break;
//TODO what about timing attacks? :]
2015-11-07 18:35:04 +00:00
uint threshold = 0;
if (max_unsat > delta) threshold = max_unsat - delta;
//TODO also timing (but it gets pretty statistically hard here I guess)
for (uint bit = 0; bit < cs; ++bit)
if (unsat[bit] > threshold) {
2015-11-07 18:35:04 +00:00
in[bit] = !in[bit];
syndrome.rot_add (H[bit / bs], bit % bs);
2015-11-07 18:35:04 +00:00
}
}
if (i == rounds) return 2; //we simply failed
errors = in_orig;
errors.add (in); //get the difference
out = in;
out.resize (plain_size());
return 0;
}