crypto: speed up alternating canonical and merge mined blocks

This commit is contained in:
Crypto City 2020-11-27 03:08:47 +00:00
parent 915753e73d
commit 71d8408168

View File

@ -244,6 +244,7 @@ void rx_slow_hash(const uint64_t mainheight, const uint64_t seedheight, const ch
rx_state *rx_sp;
randomx_cache *cache;
int lock_needed = 0;
int full_lock_needed = 0;
static const char *env = NULL;
static int faster_pow = -1;
@ -282,10 +283,31 @@ void rx_slow_hash(const uint64_t mainheight, const uint64_t seedheight, const ch
toggle ^= (is_alt != 0);
// after playing silly buggers above, just check
if (rx_s[0].rs_cache && !memcmp(seedhash, rx_s[0].rs_hash, HASH_SIZE))
{
toggle = 0;
}
else if (rx_s[1].rs_cache && !memcmp(seedhash, rx_s[1].rs_hash, HASH_SIZE))
{
toggle = 1;
}
else if (!rx_s[0].rs_cache)
{
toggle = 0;
full_lock_needed = 1;
}
else if (!rx_s[1].rs_cache)
{
toggle = 1;
full_lock_needed = 1;
}
rx_sp = &rx_s[toggle];
CTHR_MUTEX_LOCK(rx_sp->rs_mutex);
CTHR_RWLOCK_LOCK(rwlock, is_alt || lock_needed);
CTHR_MUTEX_UNLOCK(rx_mutex);
if (!full_lock_needed)
CTHR_MUTEX_UNLOCK(rx_mutex);
cache = rx_sp->rs_cache;
if (cache == NULL) {
@ -374,6 +396,8 @@ void rx_slow_hash(const uint64_t mainheight, const uint64_t seedheight, const ch
if (is_alt || lock_needed)
CTHR_MUTEX_UNLOCK(rx_sp->rs_mutex);
CTHR_RWLOCK_UNLOCK(rwlock, is_alt || lock_needed);
if (full_lock_needed)
CTHR_MUTEX_UNLOCK(rx_mutex);
}
void rx_slow_hash_allocate_state(void) {