Remove pyecc tests (#300)

* Remove pyecc tests

* Remove pyecc tests in CI too
This commit is contained in:
Justin Traglia 2023-05-10 04:05:55 -05:00 committed by GitHub
parent aa8d85dbbb
commit d0201ca2c4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 2 additions and 596 deletions

View File

@ -38,7 +38,3 @@ jobs:
run: | run: |
cd bindings/python cd bindings/python
make test make test
- name: ECC test
run: |
cd bindings/python
make ecc_test

View File

@ -1,5 +1,5 @@
.PHONY: all .PHONY: all
all: install test ecc_test all: install test
../../src/c_kzg_4844.o: ../../src/c_kzg_4844.o:
make -C../../src c_kzg_4844.o make -C../../src c_kzg_4844.o
@ -11,7 +11,3 @@ install: setup.py ckzg.c ../../src/c_kzg_4844.o
.PHONY: test .PHONY: test
test: tests.py test: tests.py
python3 $< python3 $<
.PHONY: ecc_test
ecc_test: py_ecc_tests.py
python3 $<

View File

@ -4,10 +4,9 @@ This directory contains Python bindings for the C-KZG-4844 library.
## Prerequisites ## Prerequisites
These bindings require `python3` and `py-ecc`. These bindings require `python3` and `PyYAML`.
``` ```
sudo apt install python3 python3-pip sudo apt install python3 python3-pip
python3 -m pip install py-ecc
python3 -m pip install PyYAML python3 -m pip install PyYAML
``` ```
@ -22,6 +21,4 @@ You should expect to see these messages at the bottom:
``` ```
python3 tests.py python3 tests.py
tests passed tests passed
python3 py_ecc_tests.py
comparison to py_ecc passed
``` ```

View File

@ -1,120 +0,0 @@
from py_ecc import optimized_bls12_381 as b
def _simple_ft(vals, modulus, roots_of_unity):
L = len(roots_of_unity)
o = []
for i in range(L):
last = b.Z1 if type(vals[0]) == tuple else 0
for j in range(L):
if type(vals[0]) == tuple:
last = b.add(last, b.multiply(vals[j], roots_of_unity[(i*j)%L]))
else:
last += vals[j] * roots_of_unity[(i*j)%L]
o.append(last if type(last) == tuple else last % modulus)
return o
def _fft(vals, modulus, roots_of_unity):
if len(vals) <= 4 and type(vals[0]) != tuple:
#return vals
return _simple_ft(vals, modulus, roots_of_unity)
elif len(vals) == 1 and type(vals[0]) == tuple:
return vals
L = _fft(vals[::2], modulus, roots_of_unity[::2])
R = _fft(vals[1::2], modulus, roots_of_unity[::2])
o = [0 for i in vals]
for i, (x, y) in enumerate(zip(L, R)):
y_times_root = b.multiply(y, roots_of_unity[i]) if type(y) == tuple else y*roots_of_unity[i]
o[i] = b.add(x, y_times_root) if type(x) == tuple else (x+y_times_root) % modulus
o[i+len(L)] = b.add(x, b.neg(y_times_root)) if type(x) == tuple else (x-y_times_root) % modulus
return o
def expand_root_of_unity(root_of_unity, modulus):
# Build up roots of unity
rootz = [1, root_of_unity]
while rootz[-1] != 1:
rootz.append((rootz[-1] * root_of_unity) % modulus)
return rootz
def fft(vals, modulus, root_of_unity, inv=False):
rootz = expand_root_of_unity(root_of_unity, modulus)
# Fill in vals with zeroes if needed
if len(rootz) > len(vals) + 1:
vals = vals + [0] * (len(rootz) - len(vals) - 1)
if inv:
# Inverse FFT
invlen = pow(len(vals), modulus-2, modulus)
if type(vals[0]) == tuple:
return [b.multiply(x, invlen) for x in
_fft(vals, modulus, rootz[:0:-1])]
else:
return [(x*invlen) % modulus for x in
_fft(vals, modulus, rootz[:0:-1])]
else:
# Regular FFT
return _fft(vals, modulus, rootz[:-1])
# Evaluates f(x) for f in evaluation form
def inv_fft_at_point(vals, modulus, root_of_unity, x):
if len(vals) == 1:
return vals[0]
# 1/2 in the field
half = (modulus + 1)//2
# 1/w
inv_root = pow(root_of_unity, len(vals)-1, modulus)
# f(-x) in evaluation form
f_of_minus_x_vals = vals[len(vals)//2:] + vals[:len(vals)//2]
# e(x) = (f(x) + f(-x)) / 2 in evaluation form
evens = [(f+g) * half % modulus for f,g in zip(vals, f_of_minus_x_vals)]
# o(x) = (f(x) - f(-x)) / 2 in evaluation form
odds = [(f-g) * half % modulus for f,g in zip(vals, f_of_minus_x_vals)]
# e(x^2) + coordinate * x * o(x^2) in evaluation form
comb = [(o * x * inv_root**i + e) % modulus for i, (o, e) in enumerate(zip(odds, evens))]
return inv_fft_at_point(comb[:len(comb)//2], modulus, root_of_unity ** 2 % modulus, x**2 % modulus)
def shift_domain(vals, modulus, root_of_unity, factor):
if len(vals) == 1:
return vals
# 1/2 in the field
half = (modulus + 1)//2
# 1/w
inv_factor = pow(factor, modulus - 2, modulus)
half_length = len(vals)//2
# f(-x) in evaluation form
f_of_minus_x_vals = vals[half_length:] + vals[:half_length]
# e(x) = (f(x) + f(-x)) / 2 in evaluation form
evens = [(f+g) * half % modulus for f,g in zip(vals, f_of_minus_x_vals)]
print('e', evens)
# o(x) = (f(x) - f(-x)) / 2 in evaluation form
odds = [(f-g) * half % modulus for f,g in zip(vals, f_of_minus_x_vals)]
print('o', odds)
shifted_evens = shift_domain(evens[:half_length], modulus, root_of_unity ** 2 % modulus, factor ** 2 % modulus)
print('se', shifted_evens)
shifted_odds = shift_domain(odds[:half_length], modulus, root_of_unity ** 2 % modulus, factor ** 2 % modulus)
print('so', shifted_odds)
return (
[(e + inv_factor * o) % modulus for e, o in zip(shifted_evens, shifted_odds)] +
[(e - inv_factor * o) % modulus for e, o in zip(shifted_evens, shifted_odds)]
)
def shift_poly(poly, modulus, factor):
factor_power = 1
inv_factor = pow(factor, modulus - 2, modulus)
o = []
for p in poly:
o.append(p * factor_power % modulus)
factor_power = factor_power * inv_factor % modulus
return o
def mul_polys(a, b, modulus, root_of_unity):
rootz = [1, root_of_unity]
while rootz[-1] != 1:
rootz.append((rootz[-1] * root_of_unity) % modulus)
if len(rootz) > len(a) + 1:
a = a + [0] * (len(rootz) - len(a) - 1)
if len(rootz) > len(b) + 1:
b = b + [0] * (len(rootz) - len(b) - 1)
x1 = _fft(a, modulus, rootz[:-1])
x2 = _fft(b, modulus, rootz[:-1])
return _fft([(v1*v2)%modulus for v1,v2 in zip(x1,x2)],
modulus, rootz[:0:-1])

View File

@ -1,248 +0,0 @@
from py_ecc import optimized_bls12_381 as b
from fft import fft
from multicombs import lincomb
# Generator for the field
PRIMITIVE_ROOT = 7
MODULUS = b.curve_order
assert pow(PRIMITIVE_ROOT, (MODULUS - 1) // 2, MODULUS) != 1
assert pow(PRIMITIVE_ROOT, MODULUS - 1, MODULUS) == 1
#########################################################################################
#
# Helpers
#
#########################################################################################
def is_power_of_two(x):
return x > 0 and x & (x-1) == 0
def generate_setup(s, size):
"""
# Generate trusted setup, in coefficient form.
# For data availability we always need to compute the polynomials anyway, so it makes little sense to do things in Lagrange space
"""
return (
[b.multiply(b.G1, pow(s, i, MODULUS)) for i in range(size + 1)],
[b.multiply(b.G2, pow(s, i, MODULUS)) for i in range(size + 1)],
)
#########################################################################################
#
# Field operations
#
#########################################################################################
def get_root_of_unity(order):
"""
Returns a root of unity of order "order"
"""
assert (MODULUS - 1) % order == 0
return pow(PRIMITIVE_ROOT, (MODULUS - 1) // order, MODULUS)
def inv(a):
"""
Modular inverse using eGCD algorithm
"""
if a == 0:
return 0
lm, hm = 1, 0
low, high = a % MODULUS, MODULUS
while low > 1:
r = high // low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % MODULUS
def div(x, y):
return x * inv(y) % MODULUS
#########################################################################################
#
# Polynomial operations
#
#########################################################################################
def eval_poly_at(p, x):
"""
Evaluate polynomial p (coefficient form) at point x
"""
y = 0
power_of_x = 1
for i, p_coeff in enumerate(p):
y += power_of_x * p_coeff
power_of_x = (power_of_x * x) % MODULUS
return y % MODULUS
def div_polys(a, b):
"""
Long polynomial division for two polynomials in coefficient form
"""
a = [x for x in a]
o = []
apos = len(a) - 1
bpos = len(b) - 1
diff = apos - bpos
while diff >= 0:
quot = div(a[apos], b[bpos])
o.insert(0, quot)
for i in range(bpos, -1, -1):
a[diff + i] -= b[i] * quot
apos -= 1
diff -= 1
return [x % MODULUS for x in o]
#########################################################################################
#
# Utils for reverse bit order
#
#########################################################################################
def reverse_bit_order(n, order):
"""
Reverse the bit order of an integer n
"""
assert is_power_of_two(order)
# Convert n to binary with the same number of bits as "order" - 1, then reverse its bit order
return int(('{:0' + str(order.bit_length() - 1) + 'b}').format(n)[::-1], 2)
def list_to_reverse_bit_order(l):
"""
Convert a list between normal and reverse bit order. This operation is idempotent.
"""
return [l[reverse_bit_order(i, len(l))] for i in range(len(l))]
#########################################################################################
#
# Converting between polynomials (in coefficient form) and data (in reverse bit order)
# and extending data
#
#########################################################################################
def get_polynomial(data):
"""
Interpolate a polynomial (coefficients) from data in reverse bit order
"""
assert is_power_of_two(len(data))
root_of_unity = get_root_of_unity(len(data))
return fft(list_to_reverse_bit_order(data), MODULUS, root_of_unity, True)
def get_data(polynomial):
"""
Get data (in reverse bit order) from polynomial in coefficient form
"""
assert is_power_of_two(len(polynomial))
root_of_unity = get_root_of_unity(len(polynomial))
return list_to_reverse_bit_order(fft(polynomial, MODULUS, root_of_unity, False))
def get_extended_data(polynomial):
"""
Get extended data (expanded by 2x, reverse bit order) from polynomial in coefficient form
"""
assert is_power_of_two(len(polynomial))
extended_polynomial = polynomial + [0] * len(polynomial)
root_of_unity = get_root_of_unity(len(extended_polynomial))
return list_to_reverse_bit_order(fft(extended_polynomial, MODULUS, root_of_unity, False))
#########################################################################################
#
# Kate single proofs
#
#########################################################################################
def commit_to_poly(polynomial, setup):
"""
Kate commitment to polynomial in coefficient form
"""
return lincomb(setup[0][:len(polynomial)], polynomial, b.add, b.Z1)
def compute_proof_single(polynomial, x, setup):
"""
Compute Kate proof for polynomial in coefficient form at position x
"""
quotient_polynomial = div_polys(polynomial, [-x, 1])
return lincomb(setup[0][:len(quotient_polynomial)], quotient_polynomial, b.add, b.Z1)
def check_proof_single(commitment, proof, x, y, setup):
"""
Check a proof for a Kate commitment for an evaluation f(x) = y
"""
# Verify the pairing equation
#
# e([commitment - y], [1]) = e([proof], [s - x])
# equivalent to
# e([commitment - y]^(-1), [1]) * e([proof], [s - x]) = 1_T
#
s_minus_x = b.add(setup[1][1], b.multiply(b.neg(b.G2), x))
commitment_minus_y = b.add(commitment, b.multiply(b.neg(b.G1), y))
pairing_check = b.pairing(b.G2, b.neg(commitment_minus_y), False)
pairing_check *= b.pairing(s_minus_x, proof, False)
pairing = b.final_exponentiate(pairing_check)
return pairing == b.FQ12.one()
#########################################################################################
#
# Kate multiproofs on a coset
#
#########################################################################################
def compute_proof_multi(polynomial, x, n, setup):
"""
Compute Kate proof for polynomial in coefficient form at positions x * w^y where w is
an n-th root of unity (this is the proof for one data availability sample, which consists
of several polynomial evaluations)
"""
quotient_polynomial = div_polys(polynomial, [-pow(x, n, MODULUS)] + [0] * (n - 1) + [1])
return lincomb(setup[0][:len(quotient_polynomial)], quotient_polynomial, b.add, b.Z1)
def check_proof_multi(commitment, proof, x, ys, setup):
"""
Check a proof for a Kate commitment for an evaluation f(x w^i) = y_i
"""
n = len(ys)
root_of_unity = get_root_of_unity(n)
# Interpolate at a coset. Note because it is a coset, not the subgroup, we have to multiply the
# polynomial coefficients by x^i
interpolation_polynomial = fft(ys, MODULUS, root_of_unity, True)
interpolation_polynomial = [div(c, pow(x, i, MODULUS)) for i, c in enumerate(interpolation_polynomial)]
# Verify the pairing equation
#
# e([commitment - interpolation_polynomial(s)], [1]) = e([proof], [s^n - x^n])
# equivalent to
# e([commitment - interpolation_polynomial]^(-1), [1]) * e([proof], [s^n - x^n]) = 1_T
#
xn_minus_yn = b.add(setup[1][n], b.multiply(b.neg(b.G2), pow(x, n, MODULUS)))
commitment_minus_interpolation = b.add(commitment, b.neg(lincomb(
setup[0][:len(interpolation_polynomial)], interpolation_polynomial, b.add, b.Z1)))
pairing_check = b.pairing(b.G2, b.neg(commitment_minus_interpolation), False)
pairing_check *= b.pairing(xn_minus_yn, proof, False)
pairing = b.final_exponentiate(pairing_check)
return pairing == b.FQ12.one()
if __name__ == "__main__":
polynomial = [1, 2, 3, 4, 7, 7, 7, 7, 13, 13, 13, 13, 13, 13, 13, 13]
n = len(polynomial)
setup = generate_setup(1927409816240961209460912649124, n)
commitment = commit_to_poly(polynomial, setup)
proof = compute_proof_single(polynomial, 17, setup)
value = eval_poly_at(polynomial, 17)
assert check_proof_single(commitment, proof, 17, value, setup)
print("Single point check passed")
root_of_unity = get_root_of_unity(8)
x = 5431
coset = [x * pow(root_of_unity, i, MODULUS) for i in range(8)]
ys = [eval_poly_at(polynomial, z) for z in coset]
proof = compute_proof_multi(polynomial, x, 8, setup)
assert check_proof_multi(commitment, proof, x, ys, setup)
print("Coset check passed")

View File

@ -1,131 +0,0 @@
import random, time, sys, math
# For each subset in `subsets` (provided as a list of indices into `numbers`),
# compute the sum of that subset of `numbers`. More efficient than the naive method.
def multisubset(numbers, subsets, adder=lambda x,y: x+y, zero=0):
numbers = numbers[::]
subsets = {i: {x for x in subset} for i, subset in enumerate(subsets)}
output = [zero for _ in range(len(subsets))]
for roundcount in range(9999999):
# Compute counts of every pair of indices in the subset list
pair_count = {}
for index, subset in subsets.items():
for x in subset:
for y in subset:
if y > x:
pair_count[(x, y)] = pair_count.get((x, y), 0) + 1
# Determine pairs with highest count. The cutoff parameter [:len(numbers)]
# determines a tradeoff between group operation count and other forms of overhead
pairs_by_count = sorted([el for el in pair_count.keys()], key=lambda el: pair_count[el], reverse=True)[:len(numbers)*int(math.log(len(numbers)))]
# Exit condition: all subsets have size 1, no pairs
if len(pairs_by_count) == 0:
for key, subset in subsets.items():
for index in subset:
output[key] = adder(output[key], numbers[index])
return output
# In each of the highest-count pairs, take the sum of the numbers at those indices,
# and add the result as a new value, and modify `subsets` to include the new value
# wherever possible
used = set()
for maxx, maxy in pairs_by_count:
if maxx in used or maxy in used:
continue
used.add(maxx)
used.add(maxy)
numbers.append(adder(numbers[maxx], numbers[maxy]))
for key, subset in list(subsets.items()):
if maxx in subset and maxy in subset:
subset.remove(maxx)
subset.remove(maxy)
if not subset:
output[key] = numbers[-1]
del subsets[key]
else:
subset.add(len(numbers)-1)
# Alternative algorithm. Less optimal than the above, but much lower bit twiddling
# overhead and much simpler.
def multisubset2(numbers, subsets, adder=lambda x,y: x+y, zero=0):
# Split up the numbers into partitions
partition_size = 1 + int(math.log(len(subsets) + 1))
# Align number count to partition size (for simplicity)
numbers = numbers[::]
while len(numbers) % partition_size != 0:
numbers.append(zero)
# Compute power set for each partition (eg. a, b, c -> {0, a, b, a+b, c, a+c, b+c, a+b+c})
power_sets = []
for i in range(0, len(numbers), partition_size):
new_power_set = [zero]
for dimension, value in enumerate(numbers[i:i+partition_size]):
new_power_set += [adder(n, value) for n in new_power_set]
power_sets.append(new_power_set)
# Compute subset sums, using elements from power set for each range of values
# ie. with a single power set lookup you can get the sum of _all_ elements in
# the range partition_size*k...partition_size*(k+1) that are in that subset
subset_sums = []
for subset in subsets:
o = zero
for i in range(len(power_sets)):
index_in_power_set = 0
for j in range(partition_size):
if i * partition_size + j in subset:
index_in_power_set += 2 ** j
o = adder(o, power_sets[i][index_in_power_set])
subset_sums.append(o)
return subset_sums
# Reduces a linear combination `numbers[0] * factors[0] + numbers[1] * factors[1] + ...`
# into a multi-subset problem, and computes the result efficiently
def lincomb(numbers, factors, adder=lambda x,y: x+y, zero=0):
# Maximum bit length of a number; how many subsets we need to make
maxbitlen = max((len(bin(f))-2 for f in factors), default=0)
# Compute the subsets: the ith subset contains the numbers whose corresponding factor
# has a 1 at the ith bit
subsets = [{i for i in range(len(numbers)) if factors[i] & (1 << j)} for j in range(maxbitlen+1)]
subset_sums = multisubset2(numbers, subsets, adder=adder, zero=zero)
# For example, suppose a value V has factor 6 (011 in increasing-order binary). Subset 0
# will not have V, subset 1 will, and subset 2 will. So if we multiply the output of adding
# subset 0 with twice the output of adding subset 1, with four times the output of adding
# subset 2, then V will be represented 0 + 2 + 4 = 6 times. This reasoning applies for every
# value. So `subset_0_sum + 2 * subset_1_sum + 4 * subset_2_sum` gives us the result we want.
# Here, we compute this as `((subset_2_sum * 2) + subset_1_sum) * 2 + subset_0_sum` for
# efficiency: an extra `maxbitlen * 2` group operations.
o = zero
for i in range(len(subsets)-1, -1, -1):
o = adder(adder(o, o), subset_sums[i])
return o
# Tests go here
def make_mock_adder():
counter = [0]
def adder(x, y):
if x and y:
counter[0] += 1
return x+y
return adder, counter
def test_multisubset(numcount, setcount):
numbers = [random.randrange(10**20) for _ in range(numcount)]
subsets = [{i for i in range(numcount) if random.randrange(2)} for i in range(setcount)]
adder, counter = make_mock_adder()
o = multisubset(numbers, subsets, adder=adder)
for output, subset in zip(o, subsets):
assert output == sum([numbers[x] for x in subset])
def test_lincomb(numcount, bitlength=256):
numbers = [random.randrange(10**20) for _ in range(numcount)]
factors = [random.randrange(2**bitlength) for _ in range(numcount)]
adder, counter = make_mock_adder()
o = lincomb(numbers, factors, adder=adder)
assert o == sum([n*f for n,f in zip(numbers, factors)])
total_ones = sum(bin(f).count('1') for f in factors)
print("Naive operation count: %d" % (bitlength * numcount + total_ones))
print("Optimized operation count: %d" % (bitlength * 2 + counter[0]))
print("Optimization factor: %.2f" % ((bitlength * numcount + total_ones) / (bitlength * 2 + counter[0])))
if __name__ == '__main__':
test_lincomb(int(sys.argv[1]) if len(sys.argv) >= 2 else 80)

View File

@ -1,72 +0,0 @@
import ckzg
import kzg_proofs
import random
from py_ecc import optimized_bls12_381 as b
from py_ecc.bls.point_compression import compress_G1, decompress_G1, decompress_G2
polynomial = [random.randint(0, kzg_proofs.MODULUS) for i in range(4096)]
n = len(polynomial)
x = 9283547894352
y = kzg_proofs.eval_poly_at(polynomial, x)
root_of_unity = kzg_proofs.get_root_of_unity(n)
roots_of_unity = [pow(root_of_unity, i, kzg_proofs.MODULUS) for i in range(n)]
polynomial_l = [kzg_proofs.eval_poly_at(polynomial, w) for w in roots_of_unity]
def evaluate_polynomial_in_evaluation_form(polynomial, z, roots_of_unity):
width = len(polynomial)
inverse_width =kzg_proofs.inv(width)
# Make sure we won't divide by zero during division
assert z not in roots_of_unity
result = 0
for i in range(width):
result += kzg_proofs.div(polynomial[i] * roots_of_unity[i], (z - roots_of_unity[i]))
result = result * (pow(z, width, kzg_proofs.MODULUS) - 1) * inverse_width % kzg_proofs.MODULUS
return result
y2 = evaluate_polynomial_in_evaluation_form(polynomial_l, x, roots_of_unity)
assert y == y2
polynomial_l_rbo = kzg_proofs.list_to_reverse_bit_order(polynomial_l)
roots_of_unity_rbo = kzg_proofs.list_to_reverse_bit_order(roots_of_unity)
y3 = evaluate_polynomial_in_evaluation_form(polynomial_l_rbo, x, roots_of_unity_rbo)
assert y == y3
ts = ckzg.load_trusted_setup("../../src/trusted_setup.txt")
def load_trusted_setup(filename):
with open(filename, "r") as f:
g1_length = int(f.readline())
g2_length = int(f.readline())
g1_setup = []
g2_setup = []
for i in range(g1_length):
g1_setup.append(decompress_G1(int(f.readline(), 16)))
#for i in range(g2_length):
# l = f.readline()
# g2_setup.append(decompress_G2((int(l[:48], 16), int(l[48:], 16))))
return [g1_setup, g2_setup]
ts_pyecc = load_trusted_setup("../../src/trusted_setup.txt")
commitment_pyecc = kzg_proofs.commit_to_poly(polynomial, ts_pyecc)
commitment_ckzg = ckzg.blob_to_kzg_commitment(b''.join([r.to_bytes(32, "little") for r in polynomial_l_rbo]), ts)
assert compress_G1(commitment_pyecc).to_bytes(48, "big") == commitment_ckzg
# TODO: update this test for the new ckzg interface
# proof_pyecc = kzg_proofs.compute_proof_single(polynomial, x, ts_pyecc)
# proof_ckzg = ckzg.compute_kzg_proof(ckzg_poly, ckzg.bytes_to_bls_field(x.to_bytes(32, "little")), ts)
#
# assert compress_G1(proof_pyecc).to_bytes(48, "big") == ckzg.bytes_from_g1(proof_ckzg)
print('comparison to py_ecc passed')

View File

@ -1,12 +0,0 @@
8
2
a895d8f7f4f160ab3837d80b5f6aa743b28d22ed22116269f3857130709df107867ba7e0db0d5e171492fab0db2a635f
a7fb942f2b95eefd7986e08f7a59c6f45f0a1c51be0e3f6e33ad67cf8dbf1e243632d001c52f7b441447b1fd03b51a63
a76f84feba16773b55f23d2d66037e5c1a9c85647c8986a98200585ec2f9cb28158ef842fa61beb4c86a85c229692c20
b5e605081999c80ce61b53408d71a892ac1d6f1b177ab2dc7a18b03ab612d14c51bd990e5f4826f2ba4c0310e332a9d3
98e3d5a7bad4eb817838cadb18a7c2efc5d586a402f0b54b894366855018f316518eb2d0744e439ea0f40a83176eaf46
95e3d50c2686c92568f46c8aa3a0e7cb3d456cc532c4ea0d07d47dbbed678fca2976b0b28009a360a2fa3dcd2716a7dd
b7b60fc315983545150f0b6545509ffe3f38b5562574e640dd9c61d1a5db441855df9b0cab10096fbca09024fe076602
86836a32ca078c51b9e9012fd37cf6647bc4bc2b8331b8c51f0958aed321de1fe44c2ff374d34eba13fcac18f3061a63
a010bc4d0787706e474286e18d786e4d779a3434db592b7e6398b3de129af9f16251f4b929eae78509e3c0cd06caf82813fda1cc561cef36c1ca5ffe5521f29647e80b2ac0bb5ac953cbd83096627ad4c6d19c15249a7583ffa681a5b8203af9
877241b94fba746c0c9ea95c294604e5c6dfeab9e0ad7f31fc4bbb9e2610ffdaba310e5ea82404ce3b3e5618b2b336cc194eb0620c969dad9f9a681aefdaa4370bc1051c71719e551d1f1c51f4b1735bf5c706cff6612fc3c918ddb0061a208d