1

https://github.com/ethereum/wiki/wiki/Ethash

I have compiled above code but I am not able to write the dag into a file.

Could anyone please help me on this?

I have combined all code mentioned in link above but nothing is printing.

I also executed test\*.py (provided in the test directory), but no luck.

Again, I tried below code, but it's throwing an attributes error:

with open("cache","rb") as fcache: 
    cache = fcache.read()
# Using same block 0 from benchmark code 
data_size = pyethash.get_full_size(0)
print("Generating dataset (DAG) of size %d." % data_size) 
dataset = pyethash.calc_dataset_bytes(data_size, cache) 
with open("dataset","w+b") as fcache: 
   fcache.write(dataset)

===========================================

I have successfully compiled ethash:

[root@localhost build]# ./src/benchmark/Benchmark_FULL

ethash_mkcache: 580ms, sha3: 6a286c5fc0f36814732c86c3e71c036dd96d58def86b9244bb1480571e67d2a8

ethash_light test: 10ms, a7ea1de3a8007134900cd2c86f7e55af68a1d3e4537438a0a966b6cbafa23c90

ethash_compute_full_data: 178740ms

ethash_full test: 0ns, a7ea1de3a8007134900cd2c86f7e55af68a1d3e4537438a0a966b6cbafa23c90

hashrate:   156038, bw:   1219 MB/s

[root@localhost build]# 


[root@localhost build]# cat dag_generation.py
import pyethash

with open("cache","rb") as fcache:
    cache = fcache.read()

# Using same block 0 from benchmark code
data_size = pyethash.get_full_size(0)

print("Generating dataset (DAG) of size %d." % data_size)
dataset = pyethash.calc_dataset_bytes(data_size, cache)

with open("dataset","w+b") as fcache:
    fcache.write(dataset)
[root@localhost build]# 
error :

[root@localhost build]# 

[root@localhost build]# python dag_generation.py

Traceback (most recent call last):

  File "dag_generation.py", line 7, in <module>

    data_size = pyethash.get_full_size(0)

AttributeError: 'module' object has no attribute 'get_full_size'

[root@localhost build]# 

Ismael
  • 30,570
  • 21
  • 53
  • 96

1 Answers1

1

Please find the python codes below to generate DAG and calculate dataset.

import pyethash, sha3, copy

WORD_BYTES = 4                    # bytes in word
DATASET_BYTES_INIT = 2**30        # bytes in dataset at genesis
DATASET_BYTES_GROWTH = 2**23      # dataset growth per epoch
CACHE_BYTES_INIT = 2**24          # bytes in cache at genesis
CACHE_BYTES_GROWTH = 2**17        # cache growth per epoch
CACHE_MULTIPLIER=1024             # Size of the DAG relative to the cache
EPOCH_LENGTH = 30000              # blocks per epoch
MIX_BYTES = 128                   # width of mix
HASH_BYTES = 64                   # hash length in bytes
DATASET_PARENTS = 256             # number of parents of each dataset element
CACHE_ROUNDS = 3                  # number of rounds in cache production
ACCESSES = 64                     # number of accesses in hashimoto loop


def decode_int(s):
    return int(s[::-1].encode('hex'), 16) if s else 0

def encode_int(s):
    a = "%x" % s
    return '' if s == 0 else ('0' * (len(a) % 2) + a).decode('hex')[::-1]

def zpad(s, length):
    return s + '\x00' * max(0, length - len(s))

def serialize_hash(h):
    return ''.join([zpad(encode_int(x), 4) for x in h])

def deserialize_hash(h):
    return [decode_int(h[i:i+WORD_BYTES]) for i in range(0, len(h), WORD_BYTES)]

def hash_words(h, sz, x):
    if isinstance(x, list):
        x = serialize_hash(x)
    y = h(x)
    return deserialize_hash(y)

def serialize_cache(ds):
    return ''.join([serialize_hash(h) for h in ds])

serialize_dataset = serialize_cache

# sha3 hash function, outputs 64 bytes
def sha3_512(x):
    return hash_words(lambda v: sha3.sha3_512(v).digest(), 64, x)

def sha3_256(x):
    return hash_words(lambda v: sha3.sha3_256(v).digest(), 32, x)

def xor(a, b):
    return a ^ b

def isprime(x):
    for i in range(2, int(x**0.5)):
         if x % i == 0:
             return False
    return True



def get_full_size(block_number):
    sz = DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number // EPOCH_LENGTH)
    sz -= MIX_BYTES
    while not isprime(sz / MIX_BYTES):
        sz -= 2 * MIX_BYTES
    return sz

def get_cache_size(block_number):
    sz = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number // EPOCH_LENGTH)
    sz -= HASH_BYTES
    while not isprime(sz / HASH_BYTES):
        sz -= 2 * HASH_BYTES
    return sz

def get_seedhash(block):
     s = '\x00' * 32
     for i in range(0 // EPOCH_LENGTH):
         s = serialize_hash(sha3_256(s))
     return s


def mkcache(cache_size, seed):
    n = cache_size // HASH_BYTES
    # Sequentially produce the initial dataset
    o = [sha3_512(seed)]
    for i in range(1, n):
        o.append(sha3_512(o[-1]))
    # Use a low-round version of randmemohash
#    for _ in range(CACHE_ROUNDS):
#        for i in range(n):
#            v = o[i][0] % n
#            o[i] = sha3_512(map(xor, o[(i-1+n) % n], o[v]))
    return o

FNV_PRIME = 0x01000193

def fnv(v1, v2):
    return ((v1 * FNV_PRIME) ^ v2) % 2**32

def calc_dataset_item(cache, i):
    n = len(cache)
    r = HASH_BYTES // WORD_BYTES
    # initialize the mix
    mix = copy.copy(cache[i % n])
    mix[0] ^= i
    mix = sha3_512(mix)
    # fnv it with a lot of random cache nodes based on i
    for j in range(DATASET_PARENTS):
        cache_index = fnv(i ^ j, mix[j % r])
        mix = map(fnv, mix, cache[cache_index % n])
    return sha3_512(mix)

def calc_dataset(full_size, cache):
    return [calc_dataset_item(cache, i) for i in range(full_size // HASH_BYTES)]


def hashimoto(header, nonce, full_size, dataset_lookup):
    n = full_size / HASH_BYTES
    w = MIX_BYTES // WORD_BYTES
    mixhashes = MIX_BYTES / HASH_BYTES
    # combine header+nonce into a 64 byte seed
    s = sha3_512(header + nonce[::-1])
    # start the mix with replicated s
    mix = []
    for _ in range(MIX_BYTES / HASH_BYTES):
        mix.extend(s)
    # mix in random dataset nodes
    for i in range(ACCESSES):
        p = fnv(i ^ s[0], mix[i % w]) % (n // mixhashes) * mixhashes
        newdata = []
        for j in range(MIX_BYTES / HASH_BYTES):
            newdata.extend(dataset_lookup(p + j))
        mix = map(fnv, mix, newdata)
    # compress mix
    cmix = []
    for i in range(0, len(mix), 4):
        cmix.append(fnv(fnv(fnv(mix[i], mix[i+1]), mix[i+2]), mix[i+3]))
    return {
        "mix digest": serialize_hash(cmix),
        "result": serialize_hash(sha3_256(s+cmix))
    }

def hashimoto_light(full_size, cache, header, nonce):
    return hashimoto(header, nonce, full_size, lambda x: calc_dataset_item(cache, x))

def hashimoto_full(full_size, dataset, header, nonce):
    return hashimoto(header, nonce, full_size, lambda x: dataset[x])



def mine(full_size, dataset, header, difficulty):
    # zero-pad target to compare with hash on the same digit when reversed
    target = zpad(encode_int(2**256 // difficulty), 64)[::-1]
    from random import randint
    nonce = randint(0, 2**64)
    while hashimoto_full(full_size, dataset, header, nonce) > target:
        nonce = (nonce + 1) % 2**64
    return nonce



full_size = get_full_size(0)
cache_size = get_cache_size(0) 
#seed = get_seedhash(0)
seed = pyethash.get_seedhash(0)
block_number = 0

#cache = pyethash.mkcache_bytes(0)
cache = mkcache(cache_size, seed)
#with open("cache","rb") as fcache:
#    cache = fcache.read()

#print(mkcache(cache_size, seed))

#print("pyethash seed", seed2)

print(calc_dataset(full_size, cache))