Little explanation. I have at the moment no time for writing for you all code. ( you should write thread in search def) read carefullty the change implement by me import sys import io import random import math import gmpy2 from gmpy2 import mpz from functools import lru_cache from multiprocessing import Pool, cpu_count
modulo = gmpy2.mpz(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F) order = gmpy2.mpz(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141) Gx = gmpy2.mpz(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798) Gy = gmpy2.mpz(0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8)
class Point: def __init__(self, x=0, y=0): self.x = x self.y = y
PG = Point(Gx, Gy) Z = Point(0, 0) # zero-point, infinite in real x,y-plane
def mul2(P, p=modulo): c = (3 * P.x * P.x * pow(2 * P.y, -1, p)) % p R = Point() R.x = (c * c - 2 * P.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
def add(P, Q, p=modulo): dx = Q.x - P.x dy = Q.y - P.y c = dy * gmpy2.invert(dx, p) % p R = Point() R.x = (c * c - P.x - Q.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
@lru_cache(maxsize=None) def X2Y(X, y_parity, p=modulo): Y = 3 tmp = 1 while Y: if Y & 1: tmp = tmp * X % p Y >>= 1 X = X * X % p
X = (tmp + 7) % p
Y = (p + 1) // 4 tmp = 1 while Y: if Y & 1: tmp = tmp * X % p Y >>= 1 X = X * X % p
Y = tmp
if Y % 2 != y_parity: Y = -Y % p
return Y
def compute_P_table(): P = [PG] for k in range(255): P.append(mul2(P[k])) return P
P = compute_P_table()
os.system("clear") t = time.ctime() sys.stdout.write("\033[01;33m") sys.stdout.write("################################################" + "\n") sys.stdout.write("Pollard-kangaroo PrivKey Recovery Tool multicore" + "\n") sys.stdout.write("################################################" + "\n") sys.stdout.write(t + "\n") sys.stdout.write("P-table prepared" + "\n") sys.stdout.write("tame and wild herds kangaroos is being prepared" + "\n") sys.stdout.flush()
def comparator(A, Ak, B, Bk): result = set(A).intersection(set(B)) if result: sol_kt = A.index(next(iter(result))) sol_kw = B.index(next(iter(result))) difference = Ak[sol_kt] - Bk[sol_kw] HEX = "%064x" % difference # Convert to a hexadecimal string t = time.ctime() total_time = time.time() - starttime print("\033[01;33mSOLVED:", t, f"total time: {total_time:.2f} sec", HEX, "\n") with open("KEYFOUNDKEYFOUND.txt", "a") as file: file.write("\n\nSOLVED " + t) file.write(f"\nTotal Time: {total_time:.2f} sec") file.write("\nPrivate Key (decimal): " + str(difference)) file.write("\nPrivate Key (hex): " + HEX) file.write( "\n-------------------------------------------------------------------------------------------------------------------------------------\n" )
return result
""" return True else: return False """
# Batch writing function def batch_write_data_to_file(data, file_path, batch_size=5000): with open(file_path, "a", buffering=1024 * 1024 * 1024) as fp: for i in range(0, len(data), batch_size): batch = data[i : i + batch_size] fp.writelines(batch)
# Function to check and write data to file def check( P, Pindex, DP_rarity, file2save, A, Ak, B, Bk, buffer_size=1024 * 1024 * 1024 ): if P.x % DP_rarity == 0: A.append(P.x) Ak.append(Pindex) data_to_write = ["%064x %d\n" % (P.x, Pindex)] ## YOU DONT USE TAME AND WILD IN SCRIPT! SO FOR WHAT IMPLEMENT IT? IT TAKING TIME AND MEMORY """ batch_write_data_to_file(data_to_write, file2save) # Batch write data to file # Print the public key message = "\rPublic key: {:064x}".format(P.x) sys.stdout.write("\033[01;33m") sys.stdout.write(message + "\r") sys.stdout.flush() """ return comparator(A, Ak, B, Bk) else: return False
def save2file(path, mode, data, buffer_size=1024 * 1024 * 1024): with open(path, mode, encoding="utf-8") as fp: if isinstance(data, (list, tuple, dict, set)): for line in data: if isinstance(line, str): fp.write(line) elif isinstance(line, int): fp.write(str(line)) elif isinstance(data, (str, int)): fp.write(str(data))
# Memoization for ecmultiply ecmultiply_memo = {}
def ecmultiply(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: if k in ecmultiply_memo: return ecmultiply_memo[k] else: result = ecmultiply(k // 2, mul2(P, p), p) ecmultiply_memo[k] = result return result else: return add(P, ecmultiply((k - 1) // 2, mul2(P, p), p))
def mulk(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: return mulk(k // 2, mul2(P, p), p) else: return add(P, mulk((k - 1) // 2, mul2(P, p), p))
def search(Nt, Nw, puzzle, kangoo_power, starttime): # NOT NECESSARY - TAKING TIME AS MULTIPLY BY PROCESS -> THING AS MINIMALISED TIME FOR OPERATION -> BETTER JOIN FOR ALL PROCESSING Moved ! """ DP_rarity = 1 << ((puzzle - 2 * kangoo_power) // 2 - 2) hop_modulo = ((puzzle - 1) // 2) + kangoo_power T, t, dt = [], [], [] W, w, dw = [], [], [] """ for k in range(Nt): t.append((3 << (puzzle - 2)) + random.randint(1, (2 ** (puzzle - 1)))) T.append(mulk(t[k])) dt.append(0) for k in range(Nw): w.append(random.randint(1, (1 << (puzzle - 1)))) W.append(add(W0, mulk(w[k]))) dw.append(0) #oldtime = time.time() -> NOT USING TAKING TIME Hops, Hops_old = 0, 0 #t0 = time.time() -> NOT USING TAKING TIME oldtime = time.time() starttime = oldtime while True: # THIS FUNCTION CHANGE FOR THREAD #THREAD ONE for k in range(Nt): Hops += 1 pw = T[k].x % hop_modulo dt[k] = 1 << pw solved = check(T[k], t[k], DP_rarity, "tame.txt", T, t, W, w) if solved: return "sol. time: %.2f sec" % (time.time() - starttime) t[k] += dt[k] T[k] = add(P[pw], T[k]) #THREAD 2 for k in range(Nw): Hops += 1 pw = W[k].x % hop_modulo dw[k] = 1 << pw solved = check(W[k], w[k], DP_rarity, "wild.txt", W, w, T, t) if solved: return "sol. time: %.2f sec" % (time.time() - starttime) w[k] += dw[k] W[k] = add(P[pw], W[k])
# DATA SETS AND IMPORTANT INFORMATION -> THINK ABOUT IT AS MINIMALISED OPERATION.
puzzle = 50 compressed_public_key = ( "03f46f41027bbf44fafd6b059091b900dad41e6845b2241dc3254c7cdd3c5a16c6" # Puzzle 50 ) kangoo_power = 10 # For Puzzle 50-56 use 9 to 11, for Puzzle 60-80 use 14 to 16 / 24 cores or above preferred Nt = Nw = 2**kangoo_power
# MOVED FROM SEARCH DEF DP_rarity = 1 << ((puzzle - 2 * kangoo_power) // 2 - 2) hop_modulo = ((puzzle - 1) // 2) + kangoo_power T, t, dt = [], [], [] W, w, dw = [], [], []
for k in range(Nt): t.append((3 << (puzzle - 2)) + random.randint(1, (2 ** (puzzle - 1)))) T.append(mulk(t[k])) dt.append(0) # check format pubkey if len(compressed_public_key) == 66: X = int(compressed_public_key[2:66], 16) # calculation Y from X if pubkey is compressed Y = X2Y(X, gmpy2.mpz(compressed_public_key[:2]) - 2) else: print("[error] pubkey len(66/130) invalid!")
print(f"[Puzzle]: {puzzle}") print("[Xcoordinate]: %064x" % X) print("[Ycoordinate]: %064x" % Y)
W0 = Point(X, Y) starttime = oldtime = time.time()
Hops = 0 random.seed()
hops_list = [] N_tests = kangoo_power
## YOU DONT USE TAME AND WILD IN SCRIPT! SO FOR WHAT IMPLEMENT IT? IT TAKING TIME AND MEMORY """ for k in range(N_tests): # Create empty 'tame.txt' and 'wild.txt' files for each iteration save2file("tame.txt", "w", "") save2file("wild.txt", "w", "") """
def parallel_search(process_count, Nt, Nw, puzzle, kangoo_power, starttime): pool = Pool(process_count) results = pool.starmap( search, [(Nt, Nw, puzzle, kangoo_power, starttime)] * process_count ) pool.close() pool.join() return results
if __name__ == "__main__": process_count = cpu_count() # Use all available CPU cores print(f"Using {process_count} CPU cores for parallel search.") results = parallel_search(process_count, Nt, Nw, puzzle, kangoo_power, starttime) for result in results: print(result)
try now! -> in my solution i have prefoius impelement thread one and thread 2 for functionin search. do it ! ################################################ Pollard-kangaroo PrivKey Recovery Tool multicore ################################################ Sun Sep 10 13:42:36 2023 P-table prepared tame and wild herds kangaroos is being prepared [Puzzle]: 50 [Xcoordinate]: f46f41027bbf44fafd6b059091b900dad41e6845b2241dc3254c7cdd3c5a16c6 [Ycoordinate]: eb3dfcc04c320b55c529291478550be6072977c0c86603fb2e4f5283631064fb Using 4 CPU cores for parallel search. SOLVED: Sun Sep 10 13:44:06 2023 total time: 89.33 sec -0000000000000000000000000000000000000000000000000022bd43c2e9354
Thanks. Here is the corrected one. The script was able to find 2**50 on my pc in 90 seconds. import sys import os import time import random import gmpy2 from gmpy2 import mpz from functools import lru_cache import multiprocessing from multiprocessing import Pool, cpu_count
# Constants modulo = gmpy2.mpz(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F) order = gmpy2.mpz(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141) Gx = gmpy2.mpz(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798) Gy = gmpy2.mpz(0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8)
class Point: def __init__(self, x=0, y=0): self.x = x self.y = y
PG = Point(Gx, Gy) Z = Point(0, 0) # zero-point, infinite in real x,y-plane
def mul2(P, p=modulo): c = (3 * P.x * P.x * pow(2 * P.y, -1, p)) % p R = Point() R.x = (c * c - 2 * P.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
def add(P, Q, p=modulo): dx = Q.x - P.x dy = Q.y - P.y c = dy * gmpy2.invert(dx, p) % p R = Point() R.x = (c * c - P.x - Q.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
@lru_cache(maxsize=None) def X2Y(X, y_parity, p=modulo): Y = 3 tmp = 1 while Y: if Y & 1: tmp = tmp * X % p Y >>= 1 X = X * X % p
X = (tmp + 7) % p
Y = (p + 1) // 4 tmp = 1 while Y: if Y & 1: tmp = tmp * X % p Y >>= 1 X = X * X % p
Y = tmp
if Y % 2 != y_parity: Y = -Y % p
return Y
def compute_P_table(): P = [PG] for k in range(255): P.append(mul2(P[k])) return P
P = compute_P_table()
# Global event to signal all processes to stop stop_event = multiprocessing.Event()
def comparator(A, Ak, B, Bk): global stop_event result = set(A).intersection(set(B))
if result: sol_kt = A.index(next(iter(result))) sol_kw = B.index(next(iter(result))) difference = Ak[sol_kt] - Bk[sol_kw] HEX = "%064x" % difference # Convert to a hexadecimal string t = time.ctime() total_time = time.time() - starttime print("\033[01;33mSOLVED:", t, f"total time: {total_time:.2f} sec", HEX, "\n") with open("KEYFOUNDKEYFOUND.txt", "a") as file: file.write("\n\nSOLVED " + t) file.write(f"\nTotal Time: {total_time:.2f} sec") file.write("\nPrivate Key (decimal): " + str(difference)) file.write("\nPrivate Key (hex): " + HEX) file.write( "\n-------------------------------------------------------------------------------------------------------------------------------------\n" )
stop_event.set() # Set the stop event to signal all processes
def check(P, Pindex, DP_rarity, A, Ak, B, Bk): if P.x % DP_rarity == 0: A.append(P.x) Ak.append(Pindex) # Print the public key message = "\rPublic key: {:064x}".format(P.x) sys.stdout.write("\033[01;33m") sys.stdout.write(message + "\r") sys.stdout.flush() return comparator(A, Ak, B, Bk) else: return False
ecmultiply_memo = {}
def ecmultiply(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: if k in ecmultiply_memo: return ecmultiply_memo[k] else: result = ecmultiply(k // 2, mul2(P, p), p) ecmultiply_memo[k] = result return result else: return add(P, ecmultiply((k - 1) // 2, mul2(P, p), p))
def mulk(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: return mulk(k // 2, mul2(P, p), p) else: return add(P, mulk((k - 1) // 2, mul2(P, p), p))
def search(Nt, Nw, puzzle, kangoo_power, starttime): global stop_event for k in range(Nt): t.append((3 << (puzzle - 2)) + random.randint(1, (2 ** (puzzle - 1)))) T.append(mulk(t[k])) dt.append(0)
for k in range(Nw): w.append(random.randint(1, (1 << (puzzle - 1)))) W.append(add(W0, mulk(w[k]))) dw.append(0)
Hops, Hops_old = 0, 0
oldtime = time.time() starttime = oldtime while True: for k in range(Nt): Hops += 1 pw = T[k].x % hop_modulo dt[k] = 1 << pw solved = check(T[k], t[k], DP_rarity, T, t, W, w) if solved: stop_event.set() # Set the stop event to signal all processes return "sol. time: %.2f sec" % ( time.time() - starttime ) # Return solution time sys.stdout.write("\033[01;33m") sys.stdout.flush() t[k] += dt[k] T[k] = add(P[pw], T[k])
for k in range(Nw): Hops += 1 pw = W[k].x % hop_modulo dw[k] = 1 << pw solved = check(W[k], w[k], DP_rarity, W, w, T, t) if solved: stop_event.set() # Set the stop event to signal all processes return "sol. time: %.2f sec" % ( time.time() - starttime ) # Return solution time sys.stdout.write("\033[01;33m") sys.stdout.flush() w[k] += dw[k] W[k] = add(P[pw], W[k])
# Check the stop event and exit the loop if it's set if stop_event.is_set(): sys.exit() # Exit the script if a solution is found
# Main script if __name__ == "__main__": os.system("clear") t = time.ctime() sys.stdout.write("\033[01;33m") sys.stdout.write("################################################" + "\n") sys.stdout.write("Pollard-kangaroo PrivKey Recovery Tool multicore" + "\n") sys.stdout.write("################################################" + "\n") sys.stdout.write(t + "\n") sys.stdout.flush()
# Initialize constants and precompute table puzzle = 50 kangoo_power = 10 # For Puzzle 50-56 use 9 to 11, for Puzzle 60-80 use 14 to 16 / 24 cores or above preferred Nt = Nw = 2**kangoo_power DP_rarity = 1 << ((puzzle - 2 * kangoo_power) // 2 - 2) hop_modulo = ((puzzle - 1) // 2) + kangoo_power T, t, dt = [], [], [] W, w, dw = [], [], []
compressed_public_key = "03f46f41027bbf44fafd6b059091b900dad41e6845b2241dc3254c7cdd3c5a16c6" # Puzzle 50
if len(compressed_public_key) == 66: X = int(compressed_public_key[2:66], 16) Y = X2Y(X, gmpy2.mpz(compressed_public_key[:2]) - 2) else: print("[error] pubkey len(66/130) invalid!")
print(f"[Puzzle]: {puzzle}") print("[Xcoordinate]: %064x" % X) print("[Ycoordinate]: %064x" % Y)
W0 = Point(X, Y) starttime = oldtime = time.time()
Hops = 0 random.seed()
hops_list = []
process_count = cpu_count() # Use all available CPU cores print(f"Using {process_count} CPU cores for parallel search")
# Perform parallel search pool = Pool(process_count) results = pool.starmap( search, [(Nt, Nw, puzzle, kangoo_power, starttime)] * process_count ) pool.close() pool.join()
for result in results: print(result)
|
|
|
in this example (python kangaroo parallel) :
with 4 cpu after changing little the code I have 73 second on laptop with Intel i5 for 2**50
you have "mismash" in code, you need clean code and rewrite
I updated the code to read and write "tame.txt & "wild.txt" files faster with buffering. Entered the coordinates X, Y to show at start...etc... I still don't have a time under 100 seconds for 2**50. Can you show me where the "mismash" in code is? ![Grin](https://bitcointalk.org/Smileys/default/grin.gif) Thanks in advance! import time import os import sys import io import random import math import gmpy2 from gmpy2 import mpz from functools import lru_cache from multiprocessing import Pool, cpu_count
modulo = gmpy2.mpz(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F) order = gmpy2.mpz(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141) Gx = gmpy2.mpz(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798) Gy = gmpy2.mpz(0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8)
class Point: def __init__(self, x=0, y=0): self.x = x self.y = y
PG = Point(Gx, Gy) Z = Point(0, 0) # zero-point, infinite in real x,y-plane
def mul2(P, p=modulo): c = (3 * P.x * P.x * pow(2 * P.y, -1, p)) % p R = Point() R.x = (c * c - 2 * P.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
def add(P, Q, p=modulo): dx = Q.x - P.x dy = Q.y - P.y c = dy * gmpy2.invert(dx, p) % p R = Point() R.x = (c * c - P.x - Q.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
@lru_cache(maxsize=None) def X2Y(X, y_parity, p=modulo): Y = 3 tmp = 1 while Y: if Y & 1: tmp = tmp * X % p Y >>= 1 X = X * X % p
X = (tmp + 7) % p
Y = (p + 1) // 4 tmp = 1 while Y: if Y & 1: tmp = tmp * X % p Y >>= 1 X = X * X % p
Y = tmp
if Y % 2 != y_parity: Y = -Y % p
return Y
def compute_P_table(): P = [PG] for k in range(255): P.append(mul2(P[k])) return P
P = compute_P_table()
os.system("clear") t = time.ctime() sys.stdout.write("\033[01;33m") sys.stdout.write("################################################" + "\n") sys.stdout.write("Pollard-kangaroo PrivKey Recovery Tool multicore" + "\n") sys.stdout.write("################################################" + "\n") sys.stdout.write(t + "\n") sys.stdout.write("P-table prepared" + "\n") sys.stdout.write("tame and wild herds kangaroos is being prepared" + "\n") sys.stdout.flush()
def comparator(A, Ak, B, Bk): result = set(A).intersection(set(B)) if result: sol_kt = A.index(next(iter(result))) sol_kw = B.index(next(iter(result))) difference = Ak[sol_kt] - Bk[sol_kw] HEX = "%064x" % difference # Convert to a hexadecimal string t = time.ctime() total_time = time.time() - starttime print("\033[01;33mSOLVED:", t, f"total time: {total_time:.2f} sec", HEX, "\n") with open("KEYFOUNDKEYFOUND.txt", "a") as file: file.write("\n\nSOLVED " + t) file.write(f"\nTotal Time: {total_time:.2f} sec") file.write("\nPrivate Key (decimal): " + str(difference)) file.write("\nPrivate Key (hex): " + HEX) file.write( "\n-------------------------------------------------------------------------------------------------------------------------------------\n" ) return True else: return False
# Batch writing function def batch_write_data_to_file(data, file_path, batch_size=5000): with open(file_path, "a", buffering=1024 * 1024 * 1024) as fp: for i in range(0, len(data), batch_size): batch = data[i : i + batch_size] fp.writelines(batch)
# Function to check and write data to file def check( P, Pindex, DP_rarity, file2save, A, Ak, B, Bk, buffer_size=1024 * 1024 * 1024 ): if P.x % DP_rarity == 0: A.append(P.x) Ak.append(Pindex) data_to_write = ["%064x %d\n" % (P.x, Pindex)] batch_write_data_to_file(data_to_write, file2save) # Batch write data to file # Print the public key message = "\rPublic key: {:064x}".format(P.x) sys.stdout.write("\033[01;33m") sys.stdout.write(message + "\r") sys.stdout.flush() return comparator(A, Ak, B, Bk) else: return False
def save2file(path, mode, data, buffer_size=1024 * 1024 * 1024): with open(path, mode, encoding="utf-8") as fp: if isinstance(data, (list, tuple, dict, set)): for line in data: if isinstance(line, str): fp.write(line) elif isinstance(line, int): fp.write(str(line)) elif isinstance(data, (str, int)): fp.write(str(data))
# Memoization for ecmultiply ecmultiply_memo = {}
def ecmultiply(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: if k in ecmultiply_memo: return ecmultiply_memo[k] else: result = ecmultiply(k // 2, mul2(P, p), p) ecmultiply_memo[k] = result return result else: return add(P, ecmultiply((k - 1) // 2, mul2(P, p), p))
def mulk(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: return mulk(k // 2, mul2(P, p), p) else: return add(P, mulk((k - 1) // 2, mul2(P, p), p))
def search(Nt, Nw, puzzle, kangoo_power, starttime): DP_rarity = 1 << ((puzzle - 2 * kangoo_power) // 2 - 2) hop_modulo = ((puzzle - 1) // 2) + kangoo_power T, t, dt = [], [], [] W, w, dw = [], [], [] for k in range(Nt): t.append((3 << (puzzle - 2)) + random.randint(1, (2 ** (puzzle - 1)))) T.append(mulk(t[k])) dt.append(0) for k in range(Nw): w.append(random.randint(1, (1 << (puzzle - 1)))) W.append(add(W0, mulk(w[k]))) dw.append(0) oldtime = time.time() Hops, Hops_old = 0, 0 t0 = time.time() oldtime = time.time() starttime = oldtime while True: for k in range(Nt): Hops += 1 pw = T[k].x % hop_modulo dt[k] = 1 << pw solved = check(T[k], t[k], DP_rarity, "tame.txt", T, t, W, w) if solved: return "sol. time: %.2f sec" % (time.time() - starttime) t[k] += dt[k] T[k] = add(P[pw], T[k]) for k in range(Nw): Hops += 1 pw = W[k].x % hop_modulo dw[k] = 1 << pw solved = check(W[k], w[k], DP_rarity, "wild.txt", W, w, T, t) if solved: return "sol. time: %.2f sec" % (time.time() - starttime) w[k] += dw[k] W[k] = add(P[pw], W[k])
puzzle = 50 compressed_public_key = ( "03f46f41027bbf44fafd6b059091b900dad41e6845b2241dc3254c7cdd3c5a16c6" # Puzzle 50 ) kangoo_power = 11 # For Puzzle 50-56 use 9 to 11, for Puzzle 60-80 use 14 to 16 / 24 cores or above preferred Nt = Nw = 2**kangoo_power # check format pubkey if len(compressed_public_key) == 66: X = int(compressed_public_key[2:66], 16) # calculation Y from X if pubkey is compressed Y = X2Y(X, gmpy2.mpz(compressed_public_key[:2]) - 2) else: print("[error] pubkey len(66/130) invalid!")
print(f"[Puzzle]: {puzzle}") print("[Xcoordinate]: %064x" % X) print("[Ycoordinate]: %064x" % Y)
W0 = Point(X, Y) starttime = oldtime = time.time()
Hops = 0 random.seed()
hops_list = [] N_tests = kangoo_power
for k in range(N_tests): # Create empty 'tame.txt' and 'wild.txt' files for each iteration save2file("tame.txt", "w", "") save2file("wild.txt", "w", "")
def parallel_search(process_count, Nt, Nw, puzzle, kangoo_power, starttime): pool = Pool(process_count) results = pool.starmap( search, [(Nt, Nw, puzzle, kangoo_power, starttime)] * process_count ) pool.close() pool.join() return results
if __name__ == "__main__": process_count = cpu_count() # Use all available CPU cores print(f"Using {process_count} CPU cores for parallel search.") results = parallel_search(process_count, Nt, Nw, puzzle, kangoo_power, starttime) for result in results: print(result)
|
|
|
Ps, I am really uneducated totally, I just share my experience, please nobody take my posts as if I am arrogant, I am 0.00000001 while everyone here is beyond 100.🙂
Problem is people are so consumed by media these days that their dopamine receptors are completely fried.. therefore have no patience or joy. They need to go outside and touch some grass. Sleep on it. And then come back down to reality. ![Wink](https://bitcointalk.org/Smileys/default/wink.gif)
|
|
|
Looks like those who is searching for #66 are at 354d range, even myself also search at the same range. If my research correct, #66 range should be in between 354df - 358ae. Even this range will take ages to scan. LoL.
How did you come to this conclusion? By breaking down bits and some bits combination. Maybe just some wild guessing and not sure if I am correct because I spots seems like some obvious pattern. I hope someone can find it on this range thought even if I don't get it. If the results were in this range, then I can use the same methods to proceed to 67. Unfortunately, the most lowest I can go is only on current range. I guess it boost some morale maybe I am right when I saw zahid8888 post PK start at 354d and have similarity on #66 Hash160. Anybody here familiar with Kangaroo? I have 1 stupid question. If let's say I put 1 million public address to search for private key and there's only 1 valid public address that fit the range. Will it take much more longer time to find the valid public key to get the private key? I tried just now with 100,000 public address, but the average time to solve shown unchanged.
I tried to put 1001 key with 1000 false public key and 1 puzzle 35 key. Why kangaroo can't solve it? Does that mean we can only put 1 public key at a time?
You can't find low range keys with kangaroo, 35 bit total range is less than 35 billion keys, I have tried with low ranges, my kangaroos start dying very fast, I can't even say goodbye. 🤣 I think more public keys you place in target file more you lose speed, but the speed reduction is insignificant even with few thousands less or more keys. Exactly. It dead kangaroo almost immediately when I start. I am just trying to figure out if Kangaroo able to search lots of fake key with 1 valid key at once because I have some idea to lower #130 bits down but need to do a lot of manual works. Anybody here familiar with Kangaroo? I have 1 stupid question. If let's say I put 1 million public address to search for private key and there's only 1 valid public address that fit the range. Will it take much more longer time to find the valid public key to get the private key? I tried just now with 100,000 public address, but the average time to solve shown unchanged.
I tried to put 1001 key with 1000 false public key and 1 puzzle 35 key. Why kangaroo can't solve it? Does that mean we can only put 1 public key at a time?
You can't find low range keys with kangaroo, 35 bit total range is less than 35 billion keys, I have tried with low ranges, my kangaroos start dying very fast, I can't even say goodbye. 🤣 I think more public keys you place in target file more you lose speed, but the speed reduction is insignificant even with few thousands less or more keys. I think it's quite significant but I haven't try till it solve. Will try later. For example I try with on #65 keys, it solve in less than 3 minutes. But when I put it with 100 fake keys and 1 real key, I run for 20 mins just now and it still didn't solve. I will try later to see how long it takes with 100 and 1000 keys with only 1 real key. Furthermore, I try with #64 keys while the range I set it on #65, it seems like kangaroo unable to solve it. Update on 64. When I try to solve 64 Public Key but range setting at 65, it spends almost 5 times more with a correct range provided to search. Update: I don't think Kangaroo able to solve multiple address. Now I am trying to merge save file and see if it able to resolve. He is joking. ![Grin](https://bitcointalk.org/Smileys/default/grin.gif) Sat Sep 9 11:51:57 2023 P-table prepared tame and wild herds is being prepared Using 12 CPU cores for parallel search. Public key: 02f6a8148a62320e149cb15c544fe8a25ab483a0095d2280d03b8a00a7feada13d time: 2.66 sec For 2 seconds. ![Cool](https://bitcointalk.org/Smileys/default/cool.gif) import time import os import sys import random import gmpy2 from gmpy2 import mpz from functools import lru_cache from multiprocessing import Pool, cpu_count
modulo = gmpy2.mpz(115792089237316195423570985008687907853269984665640564039457584007908834671663) order = gmpy2.mpz(115792089237316195423570985008687907852837564279074904382605163141518161494337) Gx = gmpy2.mpz(55066263022277343669578718895168534326250603453777594175500187360389116729240) Gy = gmpy2.mpz(32670510020758816978083085130507043184471273380659243275938904335757337482424)
class Point: def __init__(self, x=0, y=0): self.x = x self.y = y
PG = Point(Gx, Gy) Z = Point(0, 0) # zero-point, infinite in real x,y-plane
def mul2(P, p=modulo): c = (3 * P.x * P.x * pow(2 * P.y, -1, p)) % p R = Point() R.x = (c * c - 2 * P.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
def add(P, Q, p=modulo): dx = Q.x - P.x dy = Q.y - P.y c = dy * gmpy2.invert(dx, p) % p R = Point() R.x = (c * c - P.x - Q.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
@lru_cache(maxsize=None) def X2Y(X, y_parity, p=modulo): Y = 3 tmp = 1 while Y: if Y & 1: tmp = tmp * X % p Y >>= 1 X = X * X % p
X = (tmp + 7) % p
Y = (p + 1) // 4 tmp = 1 while Y: if Y & 1: tmp = tmp * X % p Y >>= 1 X = X * X % p
Y = tmp
if Y % 2 != y_parity: Y = -Y % p
return Y
def compute_P_table(): P = [PG] for k in range(255): P.append(mul2(P[k])) return P
P = compute_P_table()
os.system('clear') t = time.ctime() sys.stdout.write("\033[01;33m") sys.stdout.write(t + "\n") sys.stdout.write("P-table prepared" + "\n") sys.stdout.write("tame and wild herds is being prepared" + "\n") sys.stdout.flush()
def comparator(A, Ak, B, Bk): result = set(A).intersection(set(B)) if result: sol_kt = A.index(next(iter(result))) sol_kw = B.index(next(iter(result))) print('total time: %.2f sec' % (time.time() - starttime)) difference = Ak[sol_kt] - Bk[sol_kw] HEX = "%064x" % difference # Convert to a hexadecimal string t = time.ctime() print('SOLVED:', t, difference) with open("KEYFOUNDKEYFOUND.txt", 'a') as file: file.write('\n\nSOLVED ' + t) file.write('\nPrivate Key (decimal): ' + str(difference)) file.write('\nPrivate Key (hex): ' + HEX) file.write('\n-------------------------------------------------------------------------------------------------------------------------------------\n') return True else: return False
def check(P, Pindex, DP_rarity, file2save, A, Ak, B, Bk): if P.x % DP_rarity == 0: A.append(P.x) Ak.append(Pindex) with open(file2save, 'a') as file: file.write(('%064x %d' % (P.x, Pindex)) + "\n") # Print the public key message = "\rPublic key: {:064x}".format(P.x) sys.stdout.write("\033[01;33m") sys.stdout.write(message) sys.stdout.flush() return comparator(A, Ak, B, Bk) else: return False
# Memoization for ecmultiply ecmultiply_memo = {}
def ecmultiply(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: if k in ecmultiply_memo: return ecmultiply_memo[k] else: result = ecmultiply(k // 2, mul2(P, p), p) ecmultiply_memo[k] = result return result else: return add(P, ecmultiply((k - 1) // 2, mul2(P, p), p))
def mulk(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: return mulk(k // 2, mul2(P, p), p) else: return add(P, mulk((k - 1) // 2, mul2(P, p), p))
def search(Nt, Nw, puzzle, kangoo_power, starttime): DP_rarity = 1 << ((puzzle - 2 * kangoo_power) // 2 - 2) hop_modulo = ((puzzle - 1) // 2) + kangoo_power T, t, dt = [], [], [] W, w, dw = [], [], [] for k in range(Nt): t.append((3 << (puzzle - 2)) + random.randint(1, (1 << (puzzle - 1)))) T.append(mulk(t[k])) dt.append(0) for k in range(Nw): w.append(random.randint(1, (1 << (puzzle - 1)))) W.append(add(W0, mulk(w[k]))) dw.append(0) oldtime = time.time() Hops, Hops_old = 0, 0 t0 = time.time() oldtime = time.time() starttime = oldtime while True: for k in range(Nt): Hops += 1 pw = T[k].x % hop_modulo dt[k] = 1 << pw solved = check(T[k], t[k], DP_rarity, "tame.txt", T, t, W, w) if solved: return 'sol. time: %.2f sec' % (time.time() - starttime) t[k] += dt[k] T[k] = add(P[pw], T[k]) for k in range(Nw): Hops += 1 pw = W[k].x % hop_modulo dw[k] = 1 << pw solved = check(W[k], w[k], DP_rarity, "wild.txt", W, w, T, t) if solved: return 'sol. time: %.2f sec' % (time.time() - starttime) w[k] += dw[k] W[k] = add(P[pw], W[k])
puzzle = 35 compressed_public_key = "02f6a8148a62320e149cb15c544fe8a25ab483a0095d2280d03b8a00a7feada13d" # Puzzle 35 kangoo_power = 9 #For Puzzle 50-56 use 9 to 11, for Puzzle 60-80 use 14 to 16 / 24 cores or above preferred Nt = Nw = 2 ** kangoo_power X = int(compressed_public_key, 16) Y = X2Y(X % (2 ** 256), X >> 256) if Y % 2 != (X >> 256) % 2: Y = modulo - Y X = X % (2 ** 256) W0 = Point(X, Y) starttime = oldtime = time.time()
Hops = 0 random.seed()
hops_list = [] N_tests = kangoo_power
for k in range(N_tests): buffer_size = 1024 * 1024 * 1024 # 1024 MB in bytes with open("tame.txt", 'w', buffering=buffer_size) as tame_file, open("wild.txt", 'w', buffering=buffer_size) as wild_file: tame_file.write('') wild_file.write('')
def parallel_search(process_count, Nt, Nw, puzzle, kangoo_power, starttime): pool = Pool(process_count) results = pool.starmap(search, [(Nt, Nw, puzzle, kangoo_power, starttime)] * process_count) pool.close() pool.join() return results
if __name__ == '__main__': process_count = cpu_count() # Use all available CPU cores print(f"Using {process_count} CPU cores for parallel search.") results = parallel_search(process_count, Nt, Nw, puzzle, kangoo_power, starttime) for result in results: print(result)
|
|
|
import time import os import sys import random import gmpy2 from gmpy2 import mpz from functools import lru_cache from multiprocessing import Pool, cpu_count
modulo = gmpy2.mpz(115792089237316195423570985008687907853269984665640564039457584007908834671663) order = gmpy2.mpz(115792089237316195423570985008687907852837564279074904382605163141518161494337) Gx = gmpy2.mpz(55066263022277343669578718895168534326250603453777594175500187360389116729240) Gy = gmpy2.mpz(32670510020758816978083085130507043184471273380659243275938904335757337482424)
class Point: def __init__(self, x=0, y=0): self.x = x self.y = y
PG = Point(Gx, Gy) Z = Point(0, 0) # zero-point, infinite in real x,y-plane
def mul2(P, p=modulo): c = (3 * P.x * P.x * pow(2 * P.y, -1, p)) % p R = Point() R.x = (c * c - 2 * P.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
def add(P, Q, p=modulo): dx = Q.x - P.x dy = Q.y - P.y c = dy * gmpy2.invert(dx, p) % p R = Point() R.x = (c * c - P.x - Q.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
@lru_cache(maxsize=None) def X2Y(X, y_parity, p=modulo): Y = 3 tmp = 1 while Y: if Y & 1: tmp = tmp * X % p Y >>= 1 X = X * X % p
X = (tmp + 7) % p
Y = (p + 1) // 4 tmp = 1 while Y: if Y & 1: tmp = tmp * X % p Y >>= 1 X = X * X % p
Y = tmp
if Y % 2 != y_parity: Y = -Y % p
return Y
def compute_P_table(): P = [PG] for k in range(255): P.append(mul2(P[k])) return P
P = compute_P_table()
os.system('clear') t = time.ctime() sys.stdout.write("\033[01;33m") sys.stdout.write(t + "\n") sys.stdout.write("P-table prepared" + "\n") sys.stdout.write("tame and wild herds is being prepared" + "\n") sys.stdout.flush()
def comparator(A, Ak, B, Bk): result = set(A).intersection(set(B)) if result: sol_kt = A.index(next(iter(result))) sol_kw = B.index(next(iter(result))) print('total time: %.2f sec' % (time.time() - starttime)) difference = Ak[sol_kt] - Bk[sol_kw] HEX = "%064x" % difference # Convert to a hexadecimal string t = time.ctime() print('SOLVED:', t, difference) with open("KEYFOUNDKEYFOUND.txt", 'a') as file: file.write('\n\nSOLVED ' + t) file.write('\nPrivate Key (decimal): ' + str(difference)) file.write('\nPrivate Key (hex): ' + HEX) file.write('\n-------------------------------------------------------------------------------------------------------------------------------------\n') return True else: return False
def check(P, Pindex, DP_rarity, file2save, A, Ak, B, Bk): if P.x % DP_rarity == 0: A.append(P.x) Ak.append(Pindex) with open(file2save, 'a') as file: file.write(('%064x %d' % (P.x, Pindex)) + "\n") # Print the public key message = "\rPublic key: {:064x}".format(P.x) sys.stdout.write("\033[01;33m") sys.stdout.write(message) sys.stdout.flush() return comparator(A, Ak, B, Bk) else: return False
# Memoization for ecmultiply ecmultiply_memo = {}
def ecmultiply(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: if k in ecmultiply_memo: return ecmultiply_memo[k] else: result = ecmultiply(k // 2, mul2(P, p), p) ecmultiply_memo[k] = result return result else: return add(P, ecmultiply((k - 1) // 2, mul2(P, p), p))
def mulk(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: return mulk(k // 2, mul2(P, p), p) else: return add(P, mulk((k - 1) // 2, mul2(P, p), p))
def search(Nt, Nw, puzzle, kangoo_power, starttime): DP_rarity = 1 << ((puzzle - 2 * kangoo_power) // 2 - 2) hop_modulo = ((puzzle - 1) // 2) + kangoo_power T, t, dt = [], [], [] W, w, dw = [], [], [] for k in range(Nt): t.append((3 << (puzzle - 2)) + random.randint(1, (1 << (puzzle - 1)))) T.append(mulk(t[k])) dt.append(0) for k in range(Nw): w.append(random.randint(1, (1 << (puzzle - 1)))) W.append(add(W0, mulk(w[k]))) dw.append(0) oldtime = time.time() Hops, Hops_old = 0, 0 t0 = time.time() oldtime = time.time() starttime = oldtime while True: for k in range(Nt): Hops += 1 pw = T[k].x % hop_modulo dt[k] = 1 << pw solved = check(T[k], t[k], DP_rarity, "tame.txt", T, t, W, w) if solved: return 'sol. time: %.2f sec' % (time.time() - starttime) t[k] += dt[k] T[k] = add(P[pw], T[k]) for k in range(Nw): Hops += 1 pw = W[k].x % hop_modulo dw[k] = 1 << pw solved = check(W[k], w[k], DP_rarity, "wild.txt", W, w, T, t) if solved: return 'sol. time: %.2f sec' % (time.time() - starttime) w[k] += dw[k] W[k] = add(P[pw], W[k])
puzzle = 50 compressed_public_key = "03f46f41027bbf44fafd6b059091b900dad41e6845b2241dc3254c7cdd3c5a16c6" # Puzzle 50 kangoo_power = 10 #For Puzzle 50-56 use 9 to 11, for Puzzle 60-80 use 14 to 16 / 24 cores or above preferred Nt = Nw = 2 ** kangoo_power X = int(compressed_public_key, 16) Y = X2Y(X % (2 ** 256), X >> 256) if Y % 2 != (X >> 256) % 2: Y = modulo - Y X = X % (2 ** 256) W0 = Point(X, Y) starttime = oldtime = time.time()
Hops = 0 random.seed()
hops_list = [] N_tests = kangoo_power
for k in range(N_tests): buffer_size = 1024 * 1024 * 1024 # 1024 MB in bytes with open("tame.txt", 'w', buffering=buffer_size) as tame_file, open("wild.txt", 'w', buffering=buffer_size) as wild_file: tame_file.write('') wild_file.write('')
def parallel_search(process_count, Nt, Nw, puzzle, kangoo_power, starttime): pool = Pool(process_count) results = pool.starmap(search, [(Nt, Nw, puzzle, kangoo_power, starttime)] * process_count) pool.close() pool.join() return results
if __name__ == '__main__': process_count = cpu_count() # Use all available CPU cores print(f"Using {process_count} CPU cores for parallel search.") results = parallel_search(process_count, Nt, Nw, puzzle, kangoo_power, starttime) for result in results: print(result)
Fri Sep 8 21:52:03 2023 P-table prepared tame and wild herds is being prepared Using 4 CPU cores for parallel search. Public key: 03f46f41027bbf44fafd6b059091b900dad41e6845b2241dc3254c7cdd3c5a16c6 total time: 246.94 sec SOLVED: Fri Sep 8 21:56:10 2023 -611140496167764 Puzzle 50 solved for 246 seconds.... ![Grin](https://bitcointalk.org/Smileys/default/grin.gif) SOLVED Fri Sep 8 21:56:10 2023 Private Key (decimal): -611140496167764 Private Key (hex): -0000000000000000000000000000000000000000000000000022bd43c2e9354 ---------------------------------------------------------------------------------------------------------------
|
|
|
here's a fix I wrote months ago and requested a pull for that allows you to run icelands' library from any folder. Thanks for the update. ![Wink](https://bitcointalk.org/Smileys/default/wink.gif)
|
|
|
Nothing going on around these wood
We play with numbers with at least 20 decimal places here. If you could make one Giga (10^9) guesses per second, it would take: 10^20 / 10^9 = 10^11 seconds This is equivalent to roughly 3.2 million years. So, even with an incredibly fast computer making a Giga guesses per second, it would take millions of years to guess a 20-decimal-place number. We'd better start practicing crystal ball gazing to guess what the correct range of Puzzle 66 is ![Grin](https://bitcointalk.org/Smileys/default/grin.gif)
|
|
|
1 - Convert the private key from hex to bytes 00000000000000000000000000000000000000000000000354d62e5f7a0d2eb2 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03T\xd6._z\r.\xb2'
2 - Create a signing key from the private key bytes using the SECP256k1 elliptic curve <ecdsa.keys.SigningKey object at 0x000002D447E14400>
3 - Get the corresponding public key 02b21a6b1590b145841a0dabbe71ea01e29ed60f0e468cff36445a9c92eb3a6375 VerifyingKey.from_string(b'\x02\xb2\x1ak\x15\x90\xb1E\x84\x1a\r\xab\xbeq\xea\x01\xe2\x9e\xd6\x0f\x0eF\x8c\xff6DZ\x9c\x92\xeb:cu', SECP256k1, sha1)
4 - Serialize the public key in compressed format (33 bytes) b'\x02\xb2\x1ak\x15\x90\xb1E\x84\x1a\r\xab\xbeq\xea\x01\xe2\x9e\xd6\x0f\x0eF\x8c\xff6DZ\x9c\x92\xeb:cu' 02b21a6b1590b145841a0dabbe71ea01e29ed60f0e468cff36445a9c92eb3a6375
5 - Calculate the SHA-256 hash of the public key b'\t\xb4\x87?D\'I\xef>\x86\xc7\x1d\x92\x86\xb1"\xa9\xdd\xf9v%\xa0\x03X\x88\xfb\x96%F\x0e\'\x16' 09b4873f442749ef3e86c71d9286b122a9ddf97625a0035888fb9625460e2716
6 - Calculate the RIPEMD-160 hash of the SHA-256 hash <ripemd160 HASH object @ 0x000002D4477BF690> b' \xd4Zjv%3BR\xc81\x8a\x87\xed053\xc1\xc7\xbb' 20d45a6a7625334252c8318a87ed303533c1c7bb
7 - Add the version byte (0x00 for mainnet) to the RIPEMD-160 hash b'\x00'
8 - Extended RIPEMD-160 Hash b'\x00 \xd4Zjv%3BR\xc81\x8a\x87\xed053\xc1\xc7\xbb' 0020d45a6a7625334252c8318a87ed303533c1c7bb
9 - Calculate the double SHA-256 checksum b'\x01\x02l\xf90\xf6N\x8f\xeb\xca\xc8\xc2\x15\xd9Q\xb8i))\xb0\xce:\xb1\xba\x9e\xa4\xa1\x07_\x05\xe2\xa2' b'\x01\x02l\xf9'
10 - Checksum: 01026cf9
11 - Append the checksum to the extended RIPEMD-160 hash b'\x00 \xd4Zjv%3BR\xc81\x8a\x87\xed053\xc1\xc7\xbb\x01\x02l\xf9' 0020d45a6a7625334252c8318a87ed303533c1c7bb01026cf9
12 - Address (with checksum) 0020d45a6a7625334252c8318a87ed303533c1c7bb01026cf9
13 - Convert the bytes to a base58-encoded Bitcoin address 13zb1hQbWVnN3ag9GNS2vCraT8PQJDjVdr
provide an alternative, more straightforward method, if available instead of this ?
Translated into Python3 : import hashlib import ecdsa import base58
# Private key in hexadecimal format private_key_hex = "00000000000000000000000000000000000000000000000354d62e5f7a0d2eb2"
# Convert private key from hex to bytes private_key_bytes = bytes.fromhex(private_key_hex)
# Create a signing key from the private key bytes using SECP256k1 curve signing_key = ecdsa.SigningKey.from_string(private_key_bytes, curve=ecdsa.SECP256k1)
# Get the corresponding public key in compressed format compressed_public_key = signing_key.get_verifying_key().to_string("compressed")
# Calculate the SHA-256 hash of the compressed public key sha256_hash = hashlib.sha256(compressed_public_key).digest()
# Calculate the RIPEMD-160 hash of the SHA-256 hash ripemd160_hash = hashlib.new('ripemd160', sha256_hash).digest()
# Add the version byte (0x00 for mainnet) to the RIPEMD-160 hash extended_ripe160_hash = b'\x00' + ripemd160_hash
# Calculate the double SHA-256 checksum checksum = hashlib.sha256(hashlib.sha256(extended_ripe160_hash).digest()).digest()[:4]
# Append the checksum to the extended RIPEMD-160 hash address_bytes = extended_ripe160_hash + checksum
# Convert the bytes to a base58-encoded Bitcoin address bitcoin_address = base58.b58encode(address_bytes).decode()
print("Bitcoin Address:", bitcoin_address)
The process of deriving a Bitcoin address from a private key involves several cryptographic operations and encoding steps, so there isn't a significantly shorter method to achieve this without skipping any essential steps. While it may be possible to write more concise code or create a custom function to encapsulate the process, the core steps themselves are fundamental to Bitcoin address generation. So there is no way to speed up more through the code. Unfortunately. ![Grin](https://bitcointalk.org/Smileys/default/grin.gif) p.s. You can use import secp256k1 as ice https://github.com/iceland2k14/secp256k1(You must have this function in the same folder where the command is executed for it to work.) This is the a custom function which encapsulate the process above : import secp256k1 as ice
private_key_hex = "00000000000000000000000000000000000000000000000354d62e5f7a0d2eb2" dec = int(private_key_hex, 16) bitcoin_address = ice.privatekey_to_address(0, True, dec) print("Bitcoin Address:", bitcoin_address)
or: python3 -c "import secp256k1 as ice; private_key_hex = '00000000000000000000000000000000000000000000000354d62e5f7a0d2eb2'; dec = int(private_key_hex, 16); bitcoin_address = ice.privatekey_to_address(0, True, dec); print('Bitcoin Address:', bitcoin_address)"
Bitcoin Address: 13zb1hQbWVnN3ag9GNS2vCraT8PQJDjVdr But this method is not much faster for me. It all depends on how it is used.
|
|
|
Also for checkpoint.txt I just need to paste x coordinates on one line per key and save their private keys? What else do I need to change on the script?
I appreciate it.
Edit, I got it running, I just need to know what to change for addition and subtraction, should I put values in decimal? And why it won't show anything on screen? Lol it just blinks endlessly.
Here is how to tune script as per your needs: 1. xy.txt file must have x and y coordinates in decimal format with a single space between them, as I clarified earlier. 2. In checkpoints.txt file you don't need to save their private keys, why? that is whole point, because we keep starting 100 million or 1 billion pub keys' x coordinates which will work as 2 billions, so it is obvious that their private keys are from 2 to 1 billion or the last 1 billion. 3. There are 3 things that you can change, step size to be subtracted, number of steps, and number of iterations. all these are in numbers not in points. 4. Finally, why script was blinking, is because it was loading checkpoints.txt file, In my case I had 8 GB RAM with around 5.5 GB checkpoints.txt file, on a dual core system. It was taking around half an hour before printing steps.... Be patient, if no error occur, it will start printing within half an hour. I was also able to update the existing code to utilize almost all available CPU in your machine though leaving space for other activities and it's now 10 times faster... from multiprocessing import Pool, cpu_count
Pcurve = 2**256 - 2**32 - 2**9 - 2**8 - 2**7 - 2**6 - 2**4 -1 # The proven prime N=0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 # Number of points in the field Acurve = 0; Bcurve = 7 # These two defines the elliptic curve. y^2 = x^3 + Acurve * x + Bcurve Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240 Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424 GPoint = (Gx,Gy) # This is our generator point. Trillions of dif ones possible
def modinv(a, n=Pcurve): lm, hm = 1, 0 low, high = a % n, n while low > 1: ratio = high // low nm, new = hm - lm * ratio, high - low * ratio lm, low, hm, high = nm, new, lm, low return lm % n
def ECadd(a, b): if a == 'O': return b if b == 'O': return a if a == b: LamAdd = ((3 * a[0] * a[0] + Acurve) * modinv(2 * a[1], Pcurve)) % Pcurve else: LamAdd = ((b[1] - a[1]) * modinv(b[0] - a[0], Pcurve)) % Pcurve x = (LamAdd * LamAdd - a[0] - b[0]) % Pcurve y = (LamAdd * (a[0] - x) - a[1]) % Pcurve return (x, y)
def ECsub(a, b): if b == 'O': return a if isinstance(a, str): a = tuple(map(int, a.split())) if isinstance(b, str): b = tuple(map(int, b.split())) neg_b = (b[0], -b[1] % Pcurve) return ECadd(a, neg_b)
def ECmul(a, b): result = 'O' while b > 0: if b % 2 == 1: result = ECadd(result, a) a = ECadd(a, a) b = b // 2 return result
# Read the x, y coordinates from xy.txt with open("xy.txt", "r") as f: x, y = map(int, f.read().strip().split()) point = (x, y)
# Read the checkpoint x-coordinates from checkpoints.txt with open("checkpoints.txt", "r") as f: checkpoints = set(map(int, f.read().strip().split()))
filename_out = "results.txt"
sub_count = 0
# read the last value of j from file try: with open("j_value.txt", "r") as f: last_j_value = int(f.readline()) except: last_j_value = 0
def process_iteration(args): j, last_j_value, point, checkpoints, filename_out = args found_match = False sub_count = 160000000 * j for k in range(100001): if k == 0: pass else: sub_count += 212676479325586539664609129644855 result = ECmul(GPoint, sub_count) result = ECsub(point, result) print(sub_count) if result[0] in checkpoints: with open(filename_out, "w") as f_out: subtractions = sub_count // 212676479325586539664609129644855 f_out.write("{} {} {}".format(result[0], result[1], subtractions)) found_match = True break return found_match
def main(): # Read the x, y coordinates from xy.txt with open("xy.txt", "r") as f: x, y = map(int, f.read().strip().split()) point = (x, y)
# Read the checkpoint x-coordinates from checkpoints.txt with open("checkpoints.txt", "r") as f: checkpoints = set(map(int, f.read().strip().split()))
filename_out = "results.txt"
# read the last value of j from file try: with open("j_value.txt", "r") as f: last_j_value = int(f.readline()) except: last_j_value = 0
# Determine the number of processes to use num_processes = min(cpu_count(), 8) # You can adjust the number of processes
args_list = [(j, last_j_value, point, checkpoints, filename_out) for j in range(last_j_value, 10000001)]
with Pool(processes=num_processes) as pool: results = pool.map(process_iteration, args_list)
if any(results): print("Found match!") else: print("No match found.")
if __name__ == "__main__": main()
All we need now is the checkpoint generation techniques to have enough checkpoints for the code to run even faster and maximize RAM usage You can use ecmultiply_memo to store the results of previously computed point multiplications in the elliptic curve group to compute the multiplication of a point a by an integer b. Memoization helps optimize the code by storing the results of ECmul in the ecmultiply_memo dictionary for a given a and b pair. The montgomery_ladder function takes the scalar k and point P as inputs and returns the result of the point multiplication k * P. It uses a loop that processes each bit of the scalar k and combines point additions and doublings to compute the final result. This algorithm is more efficient than the simple double-and-add method . Also gmpy2 to perform modinv even faster. Something like this : from multiprocessing import Pool, cpu_count import gmpy2
Pcurve = 2**256 - 2**32 - 2**9 - 2**8 - 2**7 - 2**6 - 2**4 - 1 # The proven prime N = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 # Number of points in the field Acurve = 0 Bcurve = 7 # These two define the elliptic curve. y^2 = x^3 + Acurve * x + Bcurve Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240 Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424 GPoint = (Gx, Gy) # This is our generator point. Trillions of different ones possible
def modinv(a, n=Pcurve): return int(gmpy2.invert(a, n))
ecmultiply_memo = {} # Memoization dictionary for ECmul
def ECadd(a, b): if a == 'O': return b if b == 'O': return a if a == b: LamAdd = ((3 * a[0] * a[0] + Acurve) * modinv(2 * a[1], Pcurve)) % Pcurve else: LamAdd = ((b[1] - a[1]) * modinv(b[0] - a[0], Pcurve)) % Pcurve x = (LamAdd * LamAdd - a[0] - b[0]) % Pcurve y = (LamAdd * (a[0] - x) - a[1]) % Pcurve return (x, y)
def ECsub(a, b): if b == 'O': return a if isinstance(a, str): a = tuple(map(int, a.split())) if isinstance(b, str): b = tuple(map(int, b.split())) neg_b = (b[0], -b[1] % Pcurve) return ECadd(a, neg_b)
def ECmul(a, b): if a in ecmultiply_memo: return ecmultiply_memo[a] result = 'O' while b > 0: if b % 2 == 1: result = ECadd(result, a) a = ECadd(a, a) b = b // 2
ecmultiply_memo[a] = result return result
def montgomery_ladder(k, P): R0, R1 = 'O', P for i in range(k.bit_length()): if k & 1: R0, R1 = ECadd(R0, R1), ECmul(R0, R1) else: R0, R1 = ECmul(R0, R1), ECadd(R0, R1) k >>= 1 return R0
def process_iteration(args): j, last_j_value, point, checkpoints, filename_out = args found_match = False sub_count = 160000000 * j for k in range(100001): if k == 0: pass else: sub_count += 212676479325586539664609129644855 result = montgomery_ladder(sub_count, GPoint) # Use Montgomery ladder result = ECsub(point, result) print(sub_count) if result[0] in checkpoints: with open(filename_out, "w") as f_out: subtractions = sub_count // 212676479325586539664609129644855 f_out.write("{} {} {}".format(result[0], result[1], subtractions)) found_match = True break return found_match
def main(): # Read the x, y coordinates from xy.txt with open("xy.txt", "r") as f: x, y = map(int, f.read().strip().split()) point = (x, y)
# Read the checkpoint x-coordinates from checkpoints.txt with open("checkpoints.txt", "r") as f: checkpoints = set(map(int, f.read().strip().split()))
filename_out = "results.txt"
# read the last value of j from file try: with open("j_value.txt", "r") as f: last_j_value = int(f.readline()) except: last_j_value = 0
# Determine the number of processes to use num_processes = min(cpu_count(), 8) # You can adjust the number of processes
args_list = [(j, last_j_value, point, checkpoints, filename_out) for j in range(last_j_value, 10000001)]
with Pool(processes=num_processes) as pool: results = pool.map(process_iteration, args_list)
if any(results): print("Found match!") else: print("No match found.")
if __name__ == "__main__": main()
While I may not be proficient in mathematical terms, I'm certainly willing to give it a try. This is just an example of how I imagine the calculations.Adjust to suit your needs. So far I have not been able to mathematically solve any Puzzle. It is unsolvable. There are no patterns or repetitions. Only brute force method can do something or pure luck of random generator. There are too many unknowns in the equation. And the technology we have is insufficient.
|
|
|
Also for checkpoint.txt I just need to paste x coordinates on one line per key and save their private keys? What else do I need to change on the script?
I appreciate it.
Edit, I got it running, I just need to know what to change for addition and subtraction, should I put values in decimal? And why it won't show anything on screen? Lol it just blinks endlessly.
Here is how to tune script as per your needs: 1. xy.txt file must have x and y coordinates in decimal format with a single space between them, as I clarified earlier. 2. In checkpoints.txt file you don't need to save their private keys, why? that is whole point, because we keep starting 100 million or 1 billion pub keys' x coordinates which will work as 2 billions, so it is obvious that their private keys are from 2 to 1 billion or the last 1 billion. 3. There are 3 things that you can change, step size to be subtracted, number of steps, and number of iterations. all these are in numbers not in points. 4. Finally, why script was blinking, is because it was loading checkpoints.txt file, In my case I had 8 GB RAM with around 5.5 GB checkpoints.txt file, on a dual core system. It was taking around half an hour before printing steps.... Be patient, if no error occur, it will start printing within half an hour. I was also able to update the existing code to utilize almost all available CPU in your machine though leaving space for other activities and it's now 10 times faster... from multiprocessing import Pool, cpu_count
Pcurve = 2**256 - 2**32 - 2**9 - 2**8 - 2**7 - 2**6 - 2**4 -1 # The proven prime N=0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 # Number of points in the field Acurve = 0; Bcurve = 7 # These two defines the elliptic curve. y^2 = x^3 + Acurve * x + Bcurve Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240 Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424 GPoint = (Gx,Gy) # This is our generator point. Trillions of dif ones possible
def modinv(a, n=Pcurve): lm, hm = 1, 0 low, high = a % n, n while low > 1: ratio = high // low nm, new = hm - lm * ratio, high - low * ratio lm, low, hm, high = nm, new, lm, low return lm % n
def ECadd(a, b): if a == 'O': return b if b == 'O': return a if a == b: LamAdd = ((3 * a[0] * a[0] + Acurve) * modinv(2 * a[1], Pcurve)) % Pcurve else: LamAdd = ((b[1] - a[1]) * modinv(b[0] - a[0], Pcurve)) % Pcurve x = (LamAdd * LamAdd - a[0] - b[0]) % Pcurve y = (LamAdd * (a[0] - x) - a[1]) % Pcurve return (x, y)
def ECsub(a, b): if b == 'O': return a if isinstance(a, str): a = tuple(map(int, a.split())) if isinstance(b, str): b = tuple(map(int, b.split())) neg_b = (b[0], -b[1] % Pcurve) return ECadd(a, neg_b)
def ECmul(a, b): result = 'O' while b > 0: if b % 2 == 1: result = ECadd(result, a) a = ECadd(a, a) b = b // 2 return result
# Read the x, y coordinates from xy.txt with open("xy.txt", "r") as f: x, y = map(int, f.read().strip().split()) point = (x, y)
# Read the checkpoint x-coordinates from checkpoints.txt with open("checkpoints.txt", "r") as f: checkpoints = set(map(int, f.read().strip().split()))
filename_out = "results.txt"
sub_count = 0
# read the last value of j from file try: with open("j_value.txt", "r") as f: last_j_value = int(f.readline()) except: last_j_value = 0
def process_iteration(args): j, last_j_value, point, checkpoints, filename_out = args found_match = False sub_count = 160000000 * j for k in range(100001): if k == 0: pass else: sub_count += 212676479325586539664609129644855 result = ECmul(GPoint, sub_count) result = ECsub(point, result) print(sub_count) if result[0] in checkpoints: with open(filename_out, "w") as f_out: subtractions = sub_count // 212676479325586539664609129644855 f_out.write("{} {} {}".format(result[0], result[1], subtractions)) found_match = True break return found_match
def main(): # Read the x, y coordinates from xy.txt with open("xy.txt", "r") as f: x, y = map(int, f.read().strip().split()) point = (x, y)
# Read the checkpoint x-coordinates from checkpoints.txt with open("checkpoints.txt", "r") as f: checkpoints = set(map(int, f.read().strip().split()))
filename_out = "results.txt"
# read the last value of j from file try: with open("j_value.txt", "r") as f: last_j_value = int(f.readline()) except: last_j_value = 0
# Determine the number of processes to use num_processes = min(cpu_count(), 8) # You can adjust the number of processes
args_list = [(j, last_j_value, point, checkpoints, filename_out) for j in range(last_j_value, 10000001)]
with Pool(processes=num_processes) as pool: results = pool.map(process_iteration, args_list)
if any(results): print("Found match!") else: print("No match found.")
if __name__ == "__main__": main()
All we need now is the checkpoint generation techniques to have enough checkpoints for the code to run even faster and maximize RAM usage You can use ecmultiply_memo to store the results of previously computed point multiplications in the elliptic curve group to compute the multiplication of a point a by an integer b. Memoization helps optimize the code by storing the results of ECmul in the ecmultiply_memo dictionary for a given a and b pair. The montgomery_ladder function takes the scalar k and point P as inputs and returns the result of the point multiplication k * P. It uses a loop that processes each bit of the scalar k and combines point additions and doublings to compute the final result. This algorithm is more efficient than the simple double-and-add method . Also gmpy2 to perform modinv even faster. Something like this : from multiprocessing import Pool, cpu_count import gmpy2
Pcurve = 2**256 - 2**32 - 2**9 - 2**8 - 2**7 - 2**6 - 2**4 - 1 # The proven prime N = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 # Number of points in the field Acurve = 0 Bcurve = 7 # These two define the elliptic curve. y^2 = x^3 + Acurve * x + Bcurve Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240 Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424 GPoint = (Gx, Gy) # This is our generator point. Trillions of different ones possible
def modinv(a, n=Pcurve): return int(gmpy2.invert(a, n))
ecmultiply_memo = {} # Memoization dictionary for ECmul
def ECadd(a, b): if a == 'O': return b if b == 'O': return a if a == b: LamAdd = ((3 * a[0] * a[0] + Acurve) * modinv(2 * a[1], Pcurve)) % Pcurve else: LamAdd = ((b[1] - a[1]) * modinv(b[0] - a[0], Pcurve)) % Pcurve x = (LamAdd * LamAdd - a[0] - b[0]) % Pcurve y = (LamAdd * (a[0] - x) - a[1]) % Pcurve return (x, y)
def ECsub(a, b): if b == 'O': return a if isinstance(a, str): a = tuple(map(int, a.split())) if isinstance(b, str): b = tuple(map(int, b.split())) neg_b = (b[0], -b[1] % Pcurve) return ECadd(a, neg_b)
def ECmul(a, b): if a in ecmultiply_memo: return ecmultiply_memo[a] result = 'O' while b > 0: if b % 2 == 1: result = ECadd(result, a) a = ECadd(a, a) b = b // 2
ecmultiply_memo[a] = result return result
def montgomery_ladder(k, P): R0, R1 = 'O', P for i in range(k.bit_length()): if k & 1: R0, R1 = ECadd(R0, R1), ECmul(R0, R1) else: R0, R1 = ECmul(R0, R1), ECadd(R0, R1) k >>= 1 return R0
def process_iteration(args): j, last_j_value, point, checkpoints, filename_out = args found_match = False sub_count = 160000000 * j for k in range(100001): if k == 0: pass else: sub_count += 212676479325586539664609129644855 result = montgomery_ladder(sub_count, GPoint) # Use Montgomery ladder result = ECsub(point, result) print(sub_count) if result[0] in checkpoints: with open(filename_out, "w") as f_out: subtractions = sub_count // 212676479325586539664609129644855 f_out.write("{} {} {}".format(result[0], result[1], subtractions)) found_match = True break return found_match
def main(): # Read the x, y coordinates from xy.txt with open("xy.txt", "r") as f: x, y = map(int, f.read().strip().split()) point = (x, y)
# Read the checkpoint x-coordinates from checkpoints.txt with open("checkpoints.txt", "r") as f: checkpoints = set(map(int, f.read().strip().split()))
filename_out = "results.txt"
# read the last value of j from file try: with open("j_value.txt", "r") as f: last_j_value = int(f.readline()) except: last_j_value = 0
# Determine the number of processes to use num_processes = min(cpu_count(), 8) # You can adjust the number of processes
args_list = [(j, last_j_value, point, checkpoints, filename_out) for j in range(last_j_value, 10000001)]
with Pool(processes=num_processes) as pool: results = pool.map(process_iteration, args_list)
if any(results): print("Found match!") else: print("No match found.")
if __name__ == "__main__": main()
|
|
|
To give you some perspective, with current technology, brute forcing a 20-character password with a mix of upper case letters, numbers, could take an incredibly long time, likely beyond the lifetime of the universe. No luck there at all. ![Grin](https://bitcointalk.org/Smileys/default/grin.gif)
|
|
|
import itertools import sys import string import datetime import scrypt import threading from binascii import unhexlify from Crypto.Cipher import AES from simplebitcoinfuncs import normalize_input, b58d, hexstrlify, dechex, privtopub, compress, pubtoaddress, b58e, multiplypriv from simplebitcoinfuncs.ecmath import N from simplebitcoinfuncs.hexhashes import hash256
def simple_aes_decrypt(msg, key): assert len(msg) == 16 assert len(key) == 32 cipher = AES.new(key, AES.MODE_ECB) msg = hexstrlify(cipher.decrypt(msg)) while msg[-2:] == '7b': # Can't use rstrip for multiple chars msg = msg[:-2] for i in range((32 - len(msg)) // 2): msg = msg + '7b' assert len(msg) == 32 return unhexlify(msg)
def bip38decrypt(password, encpriv, outputlotsequence=False): password = normalize_input(password, False, True) encpriv = b58d(encpriv) assert len(encpriv) == 78 prefix = encpriv[:4] assert prefix == '0142' or prefix == '0143' flagbyte = encpriv[4:6] if prefix == '0142': salt = unhexlify(encpriv[6:14]) msg1 = unhexlify(encpriv[14:46]) msg2 = unhexlify(encpriv[46:]) scrypthash = hexstrlify(scrypt.hash(password, salt, 16384, 8, 8, 64)) key = unhexlify(scrypthash[64:]) msg1 = hexstrlify(simple_aes_decrypt(msg1, key)) msg2 = hexstrlify(simple_aes_decrypt(msg2, key)) half1 = int(msg1, 16) ^ int(scrypthash[:32], 16) half2 = int(msg2, 16) ^ int(scrypthash[32:64], 16) priv = dechex(half1, 16) + dechex(half2, 16) if int(priv, 16) == 0 or int(priv, 16) >= N: if outputlotsequence: return False, False, False else: return False pub = privtopub(priv, False) if flagbyte in COMPRESSION_FLAGBYTES: privcompress = '01' pub = compress(pub) else: privcompress = '' address = pubtoaddress(pub, '00') try: addrhex = hexstrlify(address) except: addrhex = hexstrlify(bytearray(address, 'ascii')) addresshash = hash256(addrhex)[:8] if addresshash == encpriv[6:14]: priv = b58e('80' + priv + privcompress) if outputlotsequence: return priv, False, False else: return priv else: if outputlotsequence: return False, False, False else: return False else: owner_entropy = encpriv[14:30] enchalf1half1 = encpriv[30:46] enchalf2 = encpriv[46:] if flagbyte in LOTSEQUENCE_FLAGBYTES: lotsequence = owner_entropy[8:] owner_salt = owner_entropy[:8] else: lotsequence = False owner_salt = owner_entropy salt = unhexlify(owner_salt) prefactor = hexstrlify(scrypt.hash(password, salt, 16384, 8, 8, 32)) if lotsequence is False: passfactor = prefactor else: passfactor = hash256(prefactor + owner_entropy) if int(passfactor, 16) == 0 or int(passfactor, 16) >= N: if outputlotsequence: return False, False, False else: return False passpoint = privtopub(passfactor, True) password = unhexlify(passpoint) salt = unhexlify(encpriv[6:14] + owner_entropy) encseedb = hexstrlify(scrypt.hash(password, salt, 1024, 1, 1, 64)) key = unhexlify(encseedb[64:]) tmp = hexstrlify(simple_aes_decrypt(unhexlify(enchalf2), key)) enchalf1half2_seedblastthird = int(tmp, 16) ^ int(encseedb[32:64], 16) enchalf1half2_seedblastthird = dechex(enchalf1half2_seedblastthird, 16) enchalf1half2 = enchalf1half2_seedblastthird[:16] enchalf1 = enchalf1half1 + enchalf1half2 seedb = hexstrlify(simple_aes_decrypt(unhexlify(enchalf1), key)) seedb = int(seedb, 16) ^ int(encseedb[:32], 16) seedb = dechex(seedb, 16) + enchalf1half2_seedblastthird[16:] assert len(seedb) == 48 # I want to except for this and be alerted to it try: factorb = hash256(seedb) assert int(factorb, 16) != 0 assert not int(factorb, 16) >= N except: if outputlotsequence: return False, False, False else: return False priv = multiplypriv(passfactor, factorb) pub = privtopub(priv, False) if flagbyte in COMPRESSION_FLAGBYTES: privcompress = '01' pub = compress(pub) else: privcompress = '' address = pubtoaddress(pub, '00') try: addrhex = hexstrlify(address) except: addrhex = hexstrlify(bytearray(address, 'ascii')) addresshash = hash256(addrhex)[:8] if addresshash == encpriv[6:14]: priv = b58e('80' + priv + privcompress) if outputlotsequence: if lotsequence is not False: lotsequence = int(lotsequence, 16) sequence = lotsequence % 4096 lot = (lotsequence - sequence) // 4096 return priv, lot, sequence else: return priv, False, False else: return priv else: if outputlotsequence: return False, False, False else: return False
def testPassword(pwd): try: if bip38decrypt(pwd, encryptedSecret) != False: pwdLenth = 22 + len(pwd) print("\n\n" + "#" * pwdLenth + "\n## PASSWORD FOUND: {pwd} ##\n".format(pwd=pwd) + "#" * pwdLenth + "\n") global flag flag = 1 except: pass finally: td.release()
if __name__ == '__main__': COMPRESSION_FLAGBYTES = ['20', '24', '28', '2c', '30', '34', '38', '3c', 'e0', 'e8', 'f0', 'f8'] LOTSEQUENCE_FLAGBYTES = ['04', '0c', '14', '1c', '24', '2c', '34', '3c']
encryptedSecret = "6PnQmAyBky9ZXJyZBv9QSGRUXkKh9HfnVsZWPn4YtcwoKy5vufUgfA3Ld7"
threadNum = 32
pwdCharacters = string.ascii_uppercase + string.digits maxCombination = 20 maxLength = 23 positions = [4, 9, 14, 19]
td = threading.BoundedSemaphore(int(threadNum)) threadlist = []
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
num = 0 flag = 0 for pwd in itertools.product(pwdCharacters, repeat=maxCombination): if flag == 1: break
password = "".join(pwd) if len(password) <= int(maxLength): formatted_pwd = list(password) for pos in positions: formatted_pwd.insert(pos, '-') formatted_pwd = ''.join(formatted_pwd)
num += 1 msg = 'Test Password {num} , {password}'.format(num=num, password=formatted_pwd) sys.stdout.write('\r' + msg) sys.stdout.flush() td.acquire() t = threading.Thread(target=testPassword, args=(formatted_pwd,)) t.start() threadlist.append(t) for x in threadlist: x.join()
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) if you're interested in finding the password for challenge #1, I'll leave a simple code here.this code will search for 36^20 combinations sequentially. good luck. pybip38 has not been updated over 8 years. You can speed up this script calculations by about 10 times by adding gmpy2.
|
|
|
How about percentage ?? import time import random import gmpy2 from functools import lru_cache import multiprocessing from tqdm import tqdm
modulo = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F order = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 Gx = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798 Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
class Point: def __init__(self, x=0, y=0): self.x = x self.y = y
PG = Point(Gx, Gy) Z = Point(0, 0) # zero-point, infinite in real x,y-plane
def mul2(P, p=modulo): c = (3 * P.x * P.x * pow(2 * P.y, -1, p)) % p R = Point() R.x = (c * c - 2 * P.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
def add(P, Q, p=modulo): dx = Q.x - P.x dy = Q.y - P.y c = dy * gmpy2.invert(dx, p) % p R = Point() R.x = (c * c - P.x - Q.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
@lru_cache(maxsize=None) def X2Y(X, p=modulo): if p % 4 != 3: print('prime must be 3 modulo 4') return 0 X = (X ** 3 + 7) % p pw = (p + 1) // 4 Y = 1 tmp = X for w in range(256): if (pw >> w) & 1 == 1: Y = (Y * tmp) % p tmp = pow(tmp, 2, p) return Y
def compute_P_table(): P = [PG] for k in range(255): P.append(mul2(P[k])) return P
P = compute_P_table()
print('P-table prepared')
def comparator(A, Ak, B, Bk): result = set(A).intersection(set(B)) if result: sol_kt = A.index(next(iter(result))) sol_kw = B.index(next(iter(result))) d = Ak[sol_kt] - Bk[sol_kw] print('SOLVED:', d) with open("results.txt", 'a') as file: file.write(('%d' % (Ak[sol_kt] - Bk[sol_kw])) + "\n") file.write("---------------\n") return True else: return False
def check(P, Pindex, DP_rarity, file2save, A, Ak, B, Bk): if P.x % DP_rarity == 0: A.append(P.x) Ak.append(Pindex) with open(file2save, 'a') as file: file.write(('%064x %d' % (P.x, Pindex)) + "\n") return comparator(A, Ak, B, Bk) else: return False
def mulk(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: return mulk(k // 2, mul2(P, p), p) else: return add(P, mulk((k - 1) // 2, mul2(P, p), p))
def search(process_id, Nt, Nw, problem, kangoo_power, starttime): DP_rarity = 1 << ((problem - 2 * kangoo_power) // 2 - 2) hop_modulo = ((problem - 1) // 2) + kangoo_power T, t, dt = [], [], [] W, w, dw = [], [], [] for k in range(Nt): t.append((3 << (problem - 2)) + random.randint(1, (1 << (problem - 1)))) T.append(mulk(t[k])) dt.append(0) for k in range(Nw): w.append(random.randint(1, (1 << (problem - 1)))) W.append(add(W0, mulk(w[k]))) dw.append(0)
print('tame and wild herds are prepared')
oldtime = time.time() Hops, Hops_old = 0, 0 t0 = time.time() starttime = oldtime = time.time()
total_tame_iterations = Nt * (problem - 2 * kangoo_power - 2) total_wild_iterations = Nw * (problem - 2 * kangoo_power - 2) total_iterations = total_tame_iterations + total_wild_iterations
pbar_t = tqdm(total=total_tame_iterations, desc=f"Process {process_id}: tame", position=process_id, leave=False, ncols=50, bar_format="{desc:<0}|{bar} | {percentage:3.2f}% | ") pbar_w = tqdm(total=total_wild_iterations, desc=f"Process {process_id}: wild", position=process_id, leave=False, ncols=50, bar_format="{desc:<0}|{bar} | {percentage:3.2f}% | ")
while True: for k in range(Nt): Hops += 1 pw = T[k].x % hop_modulo dt[k] = 1 << pw solved = check(T[k], t[k], DP_rarity, "tame.txt", T, t, W, w) if solved: pbar_t.close() pbar_w.close() elapsed_time_t = time.time() - starttime percentage_completed = (Hops / total_iterations) * 100 print(f'Process {process_id}: tame completed: %.2f%%' % (percentage_completed % 100)) return f'Process {process_id}: tame sol. time: %.2f sec' % elapsed_time_t t[k] += dt[k] T[k] = add(P[pw], T[k]) pbar_t.update(1)
for k in range(Nw): Hops += 1 pw = W[k].x % hop_modulo dw[k] = 1 << pw solved = check(W[k], w[k], DP_rarity, "wild.txt", W, w, T, t) if solved: pbar_t.close() pbar_w.close() elapsed_time_w = time.time() - starttime percentage_completed = (Hops / total_iterations) * 100 print(f'Process {process_id}: wild completed: %.2f%%' % (percentage_completed % 100)) return f'Process {process_id}: wild sol. time: %.2f sec' % elapsed_time_w w[k] += dw[k] W[k] = add(P[pw], W[k]) pbar_w.update(1)
def search_wrapper(args): return search(*args)
if __name__ == "__main__": start = 2147483647 end = 4294967295 search_range = end - start + 1 problem = search_range.bit_length()
compressed_public_key = "0209c58240e50e3ba3f833c82655e8725c037a2294e14cf5d73a5df8d56159de69" #Puzzle 32 kangoo_power = 3 Nt = Nw = 2 ** kangoo_power
X = int(compressed_public_key, 16) Y = X2Y(X % (2 ** 256)) if Y % 2 != (X >> 256) % 2: Y = modulo - Y X = X % (2 ** 256) W0 = Point(X, Y) starttime = oldtime = time.time()
num_cpus = multiprocessing.cpu_count() N_tests = num_cpus # Use the number of CPU cores as the number of tests
args = [(i, Nt, Nw, problem, kangoo_power, starttime) for i in range(N_tests)]
with multiprocessing.Pool(processes=N_tests) as pool: results = list(tqdm(pool.imap(search_wrapper, args), total=N_tests, bar_format="{desc:<0}|{bar} | {percentage:3.2f}% | ", ncols=50))
total_time = sum(float(result.split(': ')[-1][:-4]) for result in results if result is not None) print('Average time to solve: %.2f sec' % (total_time / N_tests))
Result: P-table prepared | | 0.00% | tame and wild herds are prepared Process 0: wild| | 0.00% | tame and wild herds are prepared tame and wild herds are prepared tame and wild herds are prepared Process 0: wild| | 0.00% | SOLVED: -3093472814 Process 0: wild completed: 61.46% |█████████▎ | 25.00% | SOLVED: -3093472814 Process 1: wild| | 0.00% | Process 2: wild completed: 28.39% Process 2: wild| | 0.00% | SOLVED: 3093472814 Process 1: tame| | 0.00% | Process 1: tame completed: 34.64% |██████████████████▌ | 50.00% | SOLVED: -3093472814 Process 3: wild| | 0.00% | Process 3: wild completed: 19.01% |████████████████████████████████████ | 100.00% | Average time to solve: 1.53 sec | 0.00% |
![Grin](https://bitcointalk.org/Smileys/default/grin.gif)
|
|
|
is there a multicore version out or can we edit this to do that , thanks mate for sharing.
Sure, here's the full script with multiprocessing added: import time import random import gmpy2 from functools import lru_cache import multiprocessing
modulo = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F order = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 Gx = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798 Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
class Point: def __init__(self, x=0, y=0): self.x = x self.y = y
PG = Point(Gx, Gy) Z = Point(0, 0) # zero-point, infinite in real x,y-plane
def mul2(P, p=modulo): c = (3 * P.x * P.x * pow(2 * P.y, -1, p)) % p R = Point() R.x = (c * c - 2 * P.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
def add(P, Q, p=modulo): dx = Q.x - P.x dy = Q.y - P.y c = dy * gmpy2.invert(dx, p) % p R = Point() R.x = (c * c - P.x - Q.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
@lru_cache(maxsize=None) def X2Y(X, p=modulo): if p % 4 != 3: print('prime must be 3 modulo 4') return 0 X = (X ** 3 + 7) % p pw = (p + 1) // 4 Y = 1 tmp = X for w in range(256): if (pw >> w) & 1 == 1: Y = (Y * tmp) % p tmp = pow(tmp, 2, p) return Y
def compute_P_table(): P = [PG] for k in range(255): P.append(mul2(P[k])) return P
P = compute_P_table()
print('P-table prepared')
def comparator(A, Ak, B, Bk): result = set(A).intersection(set(B)) if result: sol_kt = A.index(next(iter(result))) sol_kw = B.index(next(iter(result))) print('total time: %.2f sec' % (time.time() - starttime)) d = Ak[sol_kt] - Bk[sol_kw] print('SOLVED:', d) with open("results.txt", 'a') as file: file.write(('%d' % (Ak[sol_kt] - Bk[sol_kw])) + "\n") file.write("---------------\n") return True else: return False
def check(P, Pindex, DP_rarity, file2save, A, Ak, B, Bk): if P.x % DP_rarity == 0: A.append(P.x) Ak.append(Pindex) with open(file2save, 'a') as file: file.write(('%064x %d' % (P.x, Pindex)) + "\n") return comparator(A, Ak, B, Bk) else: return False
def mulk(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: return mulk(k // 2, mul2(P, p), p) else: return add(P, mulk((k - 1) // 2, mul2(P, p), p))
def search(process_id, Nt, Nw, problem, kangoo_power, starttime): DP_rarity = 1 << ((problem - 2 * kangoo_power) // 2 - 2) hop_modulo = ((problem - 1) // 2) + kangoo_power T, t, dt = [], [], [] W, w, dw = [], [], [] for k in range(Nt): t.append((3 << (problem - 2)) + random.randint(1, (1 << (problem - 1)))) T.append(mulk(t[k])) dt.append(0) for k in range(Nw): w.append(random.randint(1, (1 << (problem - 1)))) W.append(add(W0, mulk(w[k]))) dw.append(0) print('tame and wild herds are prepared') oldtime = time.time() Hops, Hops_old = 0, 0 t0 = time.time() oldtime = time.time() starttime = oldtime while True: for k in range(Nt): Hops += 1 pw = T[k].x % hop_modulo dt[k] = 1 << pw solved = check(T[k], t[k], DP_rarity, "tame.txt", T, t, W, w) if solved: return 'sol. time: %.2f sec' % (time.time() - starttime) t[k] += dt[k] T[k] = add(P[pw], T[k]) for k in range(Nw): Hops += 1 pw = W[k].x % hop_modulo dw[k] = 1 << pw solved = check(W[k], w[k], DP_rarity, "wild.txt", W, w, T, t) if solved: return 'sol. time: %.2f sec' % (time.time() - starttime) w[k] += dw[k] W[k] = add(P[pw], W[k]) t1 = time.time() if (t1 - t0) > 5: print('%.3f h/s' % ((Hops - Hops_old) / (t1 - t0))) t0 = t1 Hops_old = Hops
start = 2147483647 end = 4294967295 search_range = end - start + 1 problem = search_range.bit_length()
compreessed_public_key = "0209c58240e50e3ba3f833c82655e8725c037a2294e14cf5d73a5df8d56159de69" #Puzzle 32 kangoo_power = 3 Nt = Nw = 2 ** kangoo_power
X = int(compreessed_public_key, 16) Y = X2Y(X % (2 ** 256)) if Y % 2 != (X >> 256) % 2: Y = modulo - Y X = X % (2 ** 256) W0 = Point(X, Y) starttime = oldtime = time.time()
Hops = 0 random.seed()
hops_list = [] N_tests = 3
def search_wrapper(process_id): return search(process_id, Nt, Nw, problem, kangoo_power, starttime)
if __name__ == "__main__": num_cpus = multiprocessing.cpu_count() N_tests = num_cpus # Use the number of CPU cores as the number of tests
with multiprocessing.Pool(processes=N_tests) as pool: results = pool.map(search_wrapper, range(N_tests))
for result in results: print(result) M, D = 0, 0 if len(hops_list) > 0: M = sum(hops_list) * 1.0 / len(hops_list) D = sum((xi - M) ** 2 for xi in hops_list) * 1.0 / len(hops_list) print(M, '+/-', (D / (len(hops_list) - 1)) ** 0.5) print('Average time to solve: %.2f sec' % ((time.time() - starttime) / N_tests))
In the __name__ == "__main__" block, the number of available CPU cores is determined using multiprocessing.cpu_count(), and N_tests is set to this number. This means that the script will create a separate process for each CPU core available for parallel processing. If you're going to use multiprocessing, try not to print anything or do any I/O until the very end because the disk access bottleneck will slow the whole loop down (even on SSD - it can never possibly be as fast as a CPU's clock speed). Actually you should even import tqdm and create a progress bar for that, and then customize the progress bar to show you the key being worked on, how many have already been done, etc. Much better than printing since there are not synchronized with a mutex (by default). You are right. Using a progress bar is a much better approach than printing progress statements, especially when dealing with multiprocessing. The tqdm library is an excellent choice for displaying progress bars in Python. New script: import time import random import gmpy2 from functools import lru_cache import multiprocessing from tqdm import tqdm
modulo = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F order = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 Gx = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798 Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
class Point: def __init__(self, x=0, y=0): self.x = x self.y = y
PG = Point(Gx, Gy) Z = Point(0, 0) # zero-point, infinite in real x,y-plane
def mul2(P, p=modulo): c = (3 * P.x * P.x * pow(2 * P.y, -1, p)) % p R = Point() R.x = (c * c - 2 * P.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
def add(P, Q, p=modulo): dx = Q.x - P.x dy = Q.y - P.y c = dy * gmpy2.invert(dx, p) % p R = Point() R.x = (c * c - P.x - Q.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
@lru_cache(maxsize=None) def X2Y(X, p=modulo): if p % 4 != 3: print('prime must be 3 modulo 4') return 0 X = (X ** 3 + 7) % p pw = (p + 1) // 4 Y = 1 tmp = X for w in range(256): if (pw >> w) & 1 == 1: Y = (Y * tmp) % p tmp = pow(tmp, 2, p) return Y
def compute_P_table(): P = [PG] for k in range(255): P.append(mul2(P[k])) return P
P = compute_P_table()
print('P-table prepared')
def comparator(A, Ak, B, Bk): result = set(A).intersection(set(B)) if result: sol_kt = A.index(next(iter(result))) sol_kw = B.index(next(iter(result))) d = Ak[sol_kt] - Bk[sol_kw] print('SOLVED:', d) with open("results.txt", 'a') as file: file.write(('%d' % (Ak[sol_kt] - Bk[sol_kw])) + "\n") file.write("---------------\n") return True else: return False
def check(P, Pindex, DP_rarity, file2save, A, Ak, B, Bk): if P.x % DP_rarity == 0: A.append(P.x) Ak.append(Pindex) with open(file2save, 'a') as file: file.write(('%064x %d' % (P.x, Pindex)) + "\n") return comparator(A, Ak, B, Bk) else: return False
def mulk(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: return mulk(k // 2, mul2(P, p), p) else: return add(P, mulk((k - 1) // 2, mul2(P, p), p))
def search(process_id, Nt, Nw, problem, kangoo_power, starttime): DP_rarity = 1 << ((problem - 2 * kangoo_power) // 2 - 2) hop_modulo = ((problem - 1) // 2) + kangoo_power T, t, dt = [], [], [] W, w, dw = [], [], [] for k in range(Nt): t.append((3 << (problem - 2)) + random.randint(1, (1 << (problem - 1)))) T.append(mulk(t[k])) dt.append(0) for k in range(Nw): w.append(random.randint(1, (1 << (problem - 1)))) W.append(add(W0, mulk(w[k]))) dw.append(0)
# Move the "tame and wild herds are prepared" line here print('tame and wild herds are prepared')
oldtime = time.time() Hops, Hops_old = 0, 0 t0 = time.time() starttime = oldtime pbar_t = tqdm(total=Nt, desc=f"Process {process_id}: tame", position=process_id, dynamic_ncols=True, leave=False, ncols=100) pbar_w = tqdm(total=Nw, desc=f"Process {process_id}: wild", position=process_id, dynamic_ncols=True, leave=False, ncols=100)
while True: for k in range(Nt): Hops += 1 pw = T[k].x % hop_modulo dt[k] = 1 << pw solved = check(T[k], t[k], DP_rarity, "tame.txt", T, t, W, w) if solved: pbar_t.close() pbar_w.close() elapsed_time_t = time.time() - starttime print(f'Process {process_id}: tame sol. time: %.2f sec' % elapsed_time_t) return f'Process {process_id}: tame sol. time: %.2f sec' % elapsed_time_t t[k] += dt[k] T[k] = add(P[pw], T[k]) pbar_t.update(1)
for k in range(Nw): Hops += 1 pw = W[k].x % hop_modulo dw[k] = 1 << pw solved = check(W[k], w[k], DP_rarity, "wild.txt", W, w, T, t) if solved: pbar_t.close() pbar_w.close() elapsed_time_w = time.time() - starttime print(f'Process {process_id}: wild sol. time: %.2f sec' % elapsed_time_w) return f'Process {process_id}: wild sol. time: %.2f sec' % elapsed_time_w w[k] += dw[k] W[k] = add(P[pw], W[k]) pbar_w.update(1)
def search_wrapper(args): return search(*args)
if __name__ == "__main__": start = 2147483647 end = 4294967295 search_range = end - start + 1 problem = search_range.bit_length()
compreessed_public_key = "0209c58240e50e3ba3f833c82655e8725c037a2294e14cf5d73a5df8d56159de69" #Puzzle 32 kangoo_power = 3 Nt = Nw = 2 ** kangoo_power
X = int(compreessed_public_key, 16) Y = X2Y(X % (2 ** 256)) if Y % 2 != (X >> 256) % 2: Y = modulo - Y X = X % (2 ** 256) W0 = Point(X, Y) starttime = oldtime = time.time()
num_cpus = multiprocessing.cpu_count() N_tests = num_cpus # Use the number of CPU cores as the number of tests
args = [(i, Nt, Nw, problem, kangoo_power, starttime) for i in range(N_tests)]
with multiprocessing.Pool(processes=N_tests) as pool: results = list(tqdm(pool.imap(search_wrapper, args), total=N_tests))
# Output the average time to solve total_time = sum(float(result.split(': ')[-1][:-4]) for result in results if result is not None) print('Average time to solve: %.2f sec' % (total_time / N_tests))
It shows that the "P-table prepared" message is printed at the beginning, and then it displays progress bars for both "tame" and "wild" processes. After finishing the wild process, it shows "Process 0: wild" progress and the completion time. P-table prepared 0%| | 0/4 [00:00<?, ?it/s]tame and wild herds are prepared tame and wild herds are prepared tame and wild herds are prepared Process 0: wild: 0%| | 0/8 [00:00<?, ?it/stame and wild herds are prepared Process 0: wild: 28464it [00:01, 13883.45it/s]SOLVED: -3093472814 Process 1: tame: 28652it [00:01, 13869.72it/s]Process 2: wild sol. time: 1.83 sec Process 2: tame: 28550it [00:01, 13659.12it/s]SOLVED: 3093472814 Process 0: tame sol. time: 1.86 sec 25%|███████████████████████████SOLVED: 3093472814 | 1/4 [00:01<00:05, 1.89s/it] Process 1: tame: 47801it [00:02, 36580.65it/s]Process 1: tame sol. time: 2.35 sec 50%|██████████████████████████████████████████SOLVED: 3093472814 | 2/4 [00:02<00:02, 1.08s/it] Process 3: wild: 68509it [00:02, 29066.35it/s] Process 3: tame sol. time: 3.26 sec 100%|██████████████████████████████████████████████████████████████████████| 4/4 [00:03<00:00, 1.21it/s] Average time to solve: 2.33 sec3, 51300.68it/s]
|
|
|
is there a multicore version out or can we edit this to do that , thanks mate for sharing.
Sure, here's the full script with multiprocessing added: import time import random import gmpy2 from functools import lru_cache import multiprocessing
modulo = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F order = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 Gx = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798 Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
class Point: def __init__(self, x=0, y=0): self.x = x self.y = y
PG = Point(Gx, Gy) Z = Point(0, 0) # zero-point, infinite in real x,y-plane
def mul2(P, p=modulo): c = (3 * P.x * P.x * pow(2 * P.y, -1, p)) % p R = Point() R.x = (c * c - 2 * P.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
def add(P, Q, p=modulo): dx = Q.x - P.x dy = Q.y - P.y c = dy * gmpy2.invert(dx, p) % p R = Point() R.x = (c * c - P.x - Q.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
@lru_cache(maxsize=None) def X2Y(X, p=modulo): if p % 4 != 3: print('prime must be 3 modulo 4') return 0 X = (X ** 3 + 7) % p pw = (p + 1) // 4 Y = 1 tmp = X for w in range(256): if (pw >> w) & 1 == 1: Y = (Y * tmp) % p tmp = pow(tmp, 2, p) return Y
def compute_P_table(): P = [PG] for k in range(255): P.append(mul2(P[k])) return P
P = compute_P_table()
print('P-table prepared')
def comparator(A, Ak, B, Bk): result = set(A).intersection(set(B)) if result: sol_kt = A.index(next(iter(result))) sol_kw = B.index(next(iter(result))) print('total time: %.2f sec' % (time.time() - starttime)) d = Ak[sol_kt] - Bk[sol_kw] print('SOLVED:', d) with open("results.txt", 'a') as file: file.write(('%d' % (Ak[sol_kt] - Bk[sol_kw])) + "\n") file.write("---------------\n") return True else: return False
def check(P, Pindex, DP_rarity, file2save, A, Ak, B, Bk): if P.x % DP_rarity == 0: A.append(P.x) Ak.append(Pindex) with open(file2save, 'a') as file: file.write(('%064x %d' % (P.x, Pindex)) + "\n") return comparator(A, Ak, B, Bk) else: return False
def mulk(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: return mulk(k // 2, mul2(P, p), p) else: return add(P, mulk((k - 1) // 2, mul2(P, p), p))
def search(process_id, Nt, Nw, problem, kangoo_power, starttime): DP_rarity = 1 << ((problem - 2 * kangoo_power) // 2 - 2) hop_modulo = ((problem - 1) // 2) + kangoo_power T, t, dt = [], [], [] W, w, dw = [], [], [] for k in range(Nt): t.append((3 << (problem - 2)) + random.randint(1, (1 << (problem - 1)))) T.append(mulk(t[k])) dt.append(0) for k in range(Nw): w.append(random.randint(1, (1 << (problem - 1)))) W.append(add(W0, mulk(w[k]))) dw.append(0) print('tame and wild herds are prepared') oldtime = time.time() Hops, Hops_old = 0, 0 t0 = time.time() oldtime = time.time() starttime = oldtime while True: for k in range(Nt): Hops += 1 pw = T[k].x % hop_modulo dt[k] = 1 << pw solved = check(T[k], t[k], DP_rarity, "tame.txt", T, t, W, w) if solved: return 'sol. time: %.2f sec' % (time.time() - starttime) t[k] += dt[k] T[k] = add(P[pw], T[k]) for k in range(Nw): Hops += 1 pw = W[k].x % hop_modulo dw[k] = 1 << pw solved = check(W[k], w[k], DP_rarity, "wild.txt", W, w, T, t) if solved: return 'sol. time: %.2f sec' % (time.time() - starttime) w[k] += dw[k] W[k] = add(P[pw], W[k]) t1 = time.time() if (t1 - t0) > 5: print('%.3f h/s' % ((Hops - Hops_old) / (t1 - t0))) t0 = t1 Hops_old = Hops
start = 2147483647 end = 4294967295 search_range = end - start + 1 problem = search_range.bit_length()
compreessed_public_key = "0209c58240e50e3ba3f833c82655e8725c037a2294e14cf5d73a5df8d56159de69" #Puzzle 32 kangoo_power = 3 Nt = Nw = 2 ** kangoo_power
X = int(compreessed_public_key, 16) Y = X2Y(X % (2 ** 256)) if Y % 2 != (X >> 256) % 2: Y = modulo - Y X = X % (2 ** 256) W0 = Point(X, Y) starttime = oldtime = time.time()
Hops = 0 random.seed()
hops_list = [] N_tests = 3
def search_wrapper(process_id): return search(process_id, Nt, Nw, problem, kangoo_power, starttime)
if __name__ == "__main__": num_cpus = multiprocessing.cpu_count() N_tests = num_cpus # Use the number of CPU cores as the number of tests
with multiprocessing.Pool(processes=N_tests) as pool: results = pool.map(search_wrapper, range(N_tests))
for result in results: print(result) M, D = 0, 0 if len(hops_list) > 0: M = sum(hops_list) * 1.0 / len(hops_list) D = sum((xi - M) ** 2 for xi in hops_list) * 1.0 / len(hops_list) print(M, '+/-', (D / (len(hops_list) - 1)) ** 0.5) print('Average time to solve: %.2f sec' % ((time.time() - starttime) / N_tests))
In the __name__ == "__main__" block, the number of available CPU cores is determined using multiprocessing.cpu_count(), and N_tests is set to this number. This means that the script will create a separate process for each CPU core available for parallel processing.
|
|
|
That link no longer exists. I barely managed to find the original pollard_kangaroo.txt on some Russian site. But it is deprecated for Python 2.x. I updated to 3.x and it's still slow, but if you want to play, here you go. import time import random import gmpy2 from functools import lru_cache
modulo = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F order = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 Gx = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798 Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
class Point: def __init__(self, x=0, y=0): self.x = x self.y = y
PG = Point(Gx, Gy) Z = Point(0, 0) # zero-point, infinite in real x,y-plane
def mul2(P, p=modulo): c = (3 * P.x * P.x * pow(2 * P.y, -1, p)) % p R = Point() R.x = (c * c - 2 * P.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
def add(P, Q, p=modulo): dx = Q.x - P.x dy = Q.y - P.y #c = (dy * pow(dx, -1, p)) % p c = dy * gmpy2.invert(dx, p) % p R = Point() R.x = (c * c - P.x - Q.x) % p R.y = (c * (P.x - R.x) - P.y) % p return R
@lru_cache(maxsize=None) def X2Y(X, p=modulo): if p % 4 != 3: print('prime must be 3 modulo 4') return 0 X = (X ** 3 + 7) % p pw = (p + 1) // 4 Y = 1 tmp = X for w in range(256): if (pw >> w) & 1 == 1: Y = (Y * tmp) % p tmp = pow(tmp, 2, p) return Y
def compute_P_table(): P = [PG] for k in range(255): P.append(mul2(P[k])) return P
P = compute_P_table()
print('P-table prepared')
def comparator(A, Ak, B, Bk): result = set(A).intersection(set(B)) if result: sol_kt = A.index(next(iter(result))) sol_kw = B.index(next(iter(result))) print('total time: %.2f sec' % (time.time() - starttime)) d = Ak[sol_kt] - Bk[sol_kw] print('SOLVED:', d) with open("results.txt", 'a') as file: file.write(('%d' % (Ak[sol_kt] - Bk[sol_kw])) + "\n") file.write("---------------\n") return True else: return False
def check(P, Pindex, DP_rarity, file2save, A, Ak, B, Bk): if P.x % DP_rarity == 0: A.append(P.x) Ak.append(Pindex) with open(file2save, 'a') as file: file.write(('%064x %d' % (P.x, Pindex)) + "\n") return comparator(A, Ak, B, Bk) else: return False
def mulk(k, P=PG, p=modulo): if k == 0: return Z elif k == 1: return P elif k % 2 == 0: return mulk(k // 2, mul2(P, p), p) else: return add(P, mulk((k - 1) // 2, mul2(P, p), p), p)
def search(Nt, Nw, problem, kangoo_power, starttime): DP_rarity = 1 << ((problem - 2 * kangoo_power) // 2 - 2) hop_modulo = ((problem - 1) // 2) + kangoo_power T, t, dt = [], [], [] W, w, dw = [], [], [] for k in range(Nt): t.append((3 << (problem - 2)) + random.randint(1, (1 << (problem - 1)))) T.append(mulk(t[k])) dt.append(0) for k in range(Nw): w.append(random.randint(1, (1 << (problem - 1)))) W.append(add(W0, mulk(w[k]))) dw.append(0) print('tame and wild herds are prepared') oldtime = time.time() Hops, Hops_old = 0, 0 t0 = time.time() oldtime = time.time() starttime = oldtime while True: for k in range(Nt): Hops += 1 pw = T[k].x % hop_modulo dt[k] = 1 << pw solved = check(T[k], t[k], DP_rarity, "tame.txt", T, t, W, w) if solved: return 'sol. time: %.2f sec' % (time.time() - starttime) t[k] += dt[k] T[k] = add(P[pw], T[k]) for k in range(Nw): Hops += 1 pw = W[k].x % hop_modulo dw[k] = 1 << pw solved = check(W[k], w[k], DP_rarity, "wild.txt", W, w, T, t) if solved: return 'sol. time: %.2f sec' % (time.time() - starttime) w[k] += dw[k] W[k] = add(P[pw], W[k]) t1 = time.time() if (t1 - t0) > 5: print('%.3f h/s' % ((Hops - Hops_old) / (t1 - t0))) t0 = t1 Hops_old = Hops
start = 2147483647 end = 4294967295 search_range = end - start + 1 problem = search_range.bit_length()
compreessed_public_key = "0209c58240e50e3ba3f833c82655e8725c037a2294e14cf5d73a5df8d56159de69" #Puzzle 32 kangoo_power = 3 Nt = Nw = 2 ** kangoo_power X = int(compreessed_public_key, 16) Y = X2Y(X % (2 ** 256)) if Y % 2 != (X >> 256) % 2: Y = modulo - Y X = X % (2 ** 256) W0 = Point(X, Y) starttime = oldtime = time.time()
Hops = 0 random.seed()
hops_list = [] N_tests = 3
for k in range(N_tests): with open("tame.txt", 'w') as tame_file, open("wild.txt", 'w') as wild_file: tame_file.write('') wild_file.write('') search_result = search(Nt, Nw, problem, kangoo_power, starttime) print(search_result) M, D = 0, 0 if len(hops_list) > 0: M = sum(hops_list) * 1.0 / len(hops_list) D = sum((xi - M) ** 2 for xi in hops_list) * 1.0 / len(hops_list) print(M, '+/-', (D / (len(hops_list) - 1)) ** 0.5) print('Average time to solve: %.2f sec' % ((time.time() - starttime) / N_tests))
It is about 142827.704 h/s here....
|
|
|
I don't know what I haven't tried because there are so many attempts. I don't remember everything. Years have passed in this. I started dreaming at night about WIFs ending so.... This script calculates the common prefixes of the first 42 characters among Bitcoin private keys in a specified range. It then lists the private keys and prints the top 10 most similar common prefixes in reverse order (longest to shortest). start = 67079069358943824031 end = 69594534459904217431 Start and end sets the range of private keys (start and end values) and the number of parts to divide the range into (num_parts = 9). You can adjust these values as you see fit. Common Prefix: KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa5 Part 1 67079069358943824031 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa5Dno9kZYi4bZLVzbZF Part 2 67358565481272756631 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa5YXcS8wxDr233cNfFe Part 3 67638061603601689231 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa5sGRiXLMjdSWjJrHgT Part 4 67917557725930621831 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa6C1EzuimFQrzXCYipU Part 5 68197053848259554431 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa6Wk4HJ7AmCHUHd6pi7 Part 6 68476549970588487031 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa6qUsZgVaGyhx259cDB Part 7 68756046092917419631 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa7ADgr4synm8RiEbUHW Part 8 69035542215246352231 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa7UxW8TGPJYYuQ9jo4j Part 9 69315038337575284831 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa7ohKQqenpKyP9CM47x Top 10 Most Similar Prefixes (in reverse order): KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa7 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa7 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa6 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa6 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa5 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa5 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa7ohKQqen import secp256k1 as ice
def find_common_prefix(start, end, num_parts): part_size = (end - start) // num_parts common_prefixes = []
for i in range(num_parts): start_dec = start + i * part_size end_dec = start + (i + 1) * part_size - 1
start_hex = "%064x" % start_dec start_wif = ice.btc_pvk_to_wif(start_hex) # Compressed WIF
if not common_prefixes: common_prefixes.append(start_wif[:42]) elif start_wif.startswith(common_prefixes[-1]): continue else: # Adjust common_prefixes to match the longest common prefix for j in range(42): if start_wif[j] != common_prefixes[-1][j]: common_prefixes[-1] = common_prefixes[-1][:j] break common_prefixes.append(start_wif[:42])
return common_prefixes
def calculate_puzzle_parts(): start = 67079069358943824031 end = 69594534459904217431 num_parts = 9
common_prefixes = find_common_prefix(start, end, num_parts) print("Common Prefix:", common_prefixes[0])
part_size = (end - start) // num_parts for i in range(num_parts): start_dec = start + i * part_size end_dec = start + (i + 1) * part_size - 1
start_hex = "%064x" % start_dec start_wif = ice.btc_pvk_to_wif(start_hex) # Compressed WIF
print(f"Part {i + 1}", start_dec, start_wif)
print("\nTop 10 Most Similar Prefixes (in reverse order):") sorted_prefixes = sorted(common_prefixes, key=lambda prefix: len(prefix), reverse=True) for i in range(len(sorted_prefixes)-1, len(sorted_prefixes)-num_parts-1, -1): print(sorted_prefixes[i])
calculate_puzzle_parts()
|
|
|
They are all in range: 67079069358943824031 to 69594534459904217431 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3q + a5, a6, a7 and a8. Part 1 67079069358943824031 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa5Dno9kZYi4bZLVzbZF First two characters: a5 Part 1 67358565481272756630 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa5YXcS8wxDr1Y9XcQnE Part 2 67358565481272756631 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa5YXcS8wxDr233cNfFe First two characters: a5 Part 2 67638061603601689230 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa5sGRiXLMjdS1r2AWB6 Part 3 67638061603601689231 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa5sGRiXLMjdSWjJrHgT First two characters: a5 Part 3 67917557725930621830 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa6C1EzuimFQrVdKiyeV Part 4 67917557725930621831 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa6C1EzuimFQrzXCYipU First two characters: a6 Part 4 68197053848259554430 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa6Wk4HJ7AmCGyQd62DN Part 5 68197053848259554431 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa6Wk4HJ7AmCHUHd6pi7 First two characters: a6 Part 5 68476549970588487030 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa6qUsZgVaGyhT8CEc6k Part 6 68476549970588487031 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa6qUsZgVaGyhx259cDB First two characters: a6 Part 6 68756046092917419630 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa7ADgr4synm7vmiRzF3 Part 7 68756046092917419631 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa7ADgr4synm8RiEbUHW First two characters: a7 Part 7 69035542215246352230 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa7UxW8TGPJYYQYcKdjj Part 8 69035542215246352231 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa7UxW8TGPJYYuQ9jo4j First two characters: a7 Part 8 69315038337575284830 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa7ohKQqenpKxtGP3TDy Part 9 69315038337575284831 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa7ohKQqenpKyP9CM47x First two characters: a7 Part 9 69594534459904217430 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa88S8hE3CL7PMzjRr9u Part 10 69594534459904217431 KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qa88S8hE3CL7PrqXtxnM First two characters: a8 It only takes about 6143,074 years to solve this range. ![Grin](https://bitcointalk.org/Smileys/default/grin.gif)
|
|
|
As the list unfolds, it weaves a tapestry of interwoven imagination, leaving the observer intrigued by the enigmatic beauty that emerges from the simple elegance of logarithmic transformations. Like the cryptic genius behind an encrypted message, the origins and intentions of this sequence remain shrouded in mystery, inviting wonder and sparking curiosity within those who seek to comprehend its enigmatic essence. ![Grin](https://bitcointalk.org/Smileys/default/grin.gif)
|
|
|
May I ask why you guys are doing this? What do you think is the advantage?
Limited resources...... logarithmic difference (base 10) between WIFs 1-65: log(3) - log(1) ≈ 0.47712125471966244 log(7) - log(3) ≈ 0.36797678529459443 log(8 ) - log(7) ≈ 0.05799194697768673 log(21) - log(8 ) ≈ 0.41912930774197565 log(49) - log(21) ≈ 0.36797678529459443 log(76) - log(49) ≈ 0.19061751225227766 log(224) - log(76) ≈ 0.46943442605337143 log(467) - log(224) ≈ 0.3190688622319493 log(514) - log(467) ≈ 0.04164623842916361 log(1155) - log(514) ≈ 0.3516188652328874 log(2683) - log(1155) ≈ 0.3660386884437759 log(5216) - log(2683) ≈ 0.2887169100519248 log(10544) - log(5216) ≈ 0.3056678145260709 log(26867) - log(10544) ≈ 0.40621377807107406 log(51510) - log(26867) ≈ 0.28267237455957017 log(95823) - log(51510) ≈ 0.26957821362600115 log(198669) - log(95823) ≈ 0.31666034225815626 log(357535) - log(198669) ≈ 0.25518845663206713 log(863317) - log(357535) ≈ 0.3828517305049723 log(1811764) - log(863317) ≈ 0.3219313330147192 log(3007503) - log(1811764) ≈ 0.22010444330662446 log(5598802) - log(3007503) ≈ 0.26988903984573276 log(14428676) - log(5598802) ≈ 0.41113137224813495 log(33185509) - log(14428676) ≈ 0.3617220019295993 log(54538862) - log(33185509) ≈ 0.21575758852777027 log(111949941) - log(54538862) ≈ 0.3123177972583235 log(227634408) - log(111949941) ≈ 0.3082140392839779 log(400708894) - log(227634408) ≈ 0.24559107367744676 log(1033162084) - log(400708894) ≈ 0.4113394776328735 log(2102388551) - log(1033162084) ≈ 0.30854452321700526 log(3093472814) - log(2102388551) ≈ 0.167733320870153 log(7137437912) - log(3093472814) ≈ 0.36309603966333015 log(14133072157) - log(7137437912) ≈ 0.2966942328950074 log(20112871792) - log(14133072157) ≈ 0.15323750896247362 log(42387769980) - log(20112871792) ≈ 0.3237664837075102 log(100251560595) - log(42387769980) ≈ 0.3738505729734196 log(146971536592) - log(100251560595) ≈ 0.16614209284620785 log(323724968937) - log(146971536592) ≈ 0.3429429631062055 log(1003651412950) - log(323724968937) ≈ 0.4914067024711515 log(1458252205147) - log(1003651412950) ≈ 0.16224974149667518 log(2895374552463) - log(1458252205147) ≈ 0.29787211121731233 log(7409811047825) - log(2895374552463) ≈ 0.40810238044098246 log(15404761757071) - log(7409811047825) ≈ 0.3178478526126524 log(19996463086597) - log(15404761757071) ≈ 0.11329819966627307 log(51408670348612) - log(19996463086597) ≈ 0.41008318549831724 log(119666659114170) - log(51408670348612) ≈ 0.366936795177466 log(191206974700443) - log(119666659114170) ≈ 0.20353056363880792 log(409118905032525) - log(191206974700443) ≈ 0.33034581824839665 log(611140496167764) - log(409118905032525) ≈ 0.17429151410955282 log(2058769515153876) - log(611140496167764) ≈ 0.5274666664452157 log(4216495639600700) - log(2058769515153876) ≈ 0.3113439266558567 log(6763683971478124) - log(4216495639600700) ≈ 0.20523165173925442 log(9974455244496707) - log(6763683971478124) ≈ 0.16870587869969977 log(30045390491869460) - log(9974455244496707) ≈ 0.47888866680875336 log(44218742292676575) - log(30045390491869460) ≈ 0.16782853304825565 log(138245758910846492) - log(44218742292676575) ≈ 0.49504543109679533 log(199976667976342049) - log(138245758910846492) ≈ 0.16032751092312678 log(525070384258266191) - log(199976667976342049) ≈ 0.41923819544053276 log(1135041350219496382) - log(525070384258266191) ≈ 0.3347941601156713 log(1425787542618654982) - log(1135041350219496382) ≈ 0.09904313245967539 log(3908372542507822062) - log(1425787542618654982) ≈ 0.4379411376951916 log(8993229949524469768) - log(3908372542507822062) ≈ 0.36191974453574244 log(17799667357578236628) - log(8993229949524469768) ≈ 0.2964961881251935 log(30568377312064202855) - log(17799667357578236628) ≈ 0.23486049906891004 import math
# Given list of numbers numbers = [ 1, 3, 7, 8, 21, 49, 76, 224, 467, 514, 1155, 2683, 5216, 10544, 26867, 51510, 95823, 198669, 357535, 863317, 1811764, 3007503, 5598802, 14428676, 33185509, 54538862, 111949941, 227634408, 400708894, 1033162084, 2102388551, 3093472814, 7137437912, 14133072157, 20112871792, 42387769980, 100251560595, 146971536592, 323724968937, 1003651412950, 1458252205147, 2895374552463, 7409811047825, 15404761757071, 19996463086597, 51408670348612, 119666659114170, 191206974700443, 409118905032525, 611140496167764, 2058769515153876, 4216495639600700, 6763683971478124, 9974455244496707, 30045390491869460, 44218742292676575, 138245758910846492, 199976667976342049, 525070384258266191, 1135041350219496382, 1425787542618654982, 3908372542507822062, 8993229949524469768, 17799667357578236628, 30568377312064202855 ]
def calculate_log_difference(lst): log_diff = [] for i in range(1, len(lst)): diff = lst[i] / lst[i - 1] log_diff.append(math.log10(diff)) return log_diff
# Calculate the logarithmic difference between consecutive elements logarithmic_difference = calculate_log_difference(numbers)
# Print the result for i in range(len(logarithmic_difference)): print(f"log({numbers[i+1]}) - log({numbers[i]}) ≈ {logarithmic_difference[i]}")
When we look at the differences, we can observe that they are roughly consistent, hovering around 0.4 to 0.6. ![Grin](https://bitcointalk.org/Smileys/default/grin.gif) The differences appear to fluctuate without any apparent pattern.
|
|
|
|