bitset: Actually enable an optimization in bit_set/clear_range

Previously, we were setting bits up to the first byte boundary,
memset()ing to the last byte boundary, then ignoring the memset()
and resetting every single bit up to the last one individually,
from where the first for-loop left off.

This should be *at least* nine times faster.
This commit is contained in:
nick
2013-07-24 11:19:52 +01:00
parent 9a37951aaa
commit f8fd4e0437

View File

@@ -34,16 +34,40 @@ static inline void bit_clear(char* b, uint64_t idx) {
//__sync_fetch_and_nand(b+(idx/8), char_with_bit_set(idx));
}
/** Sets ''len'' bits in array ''b'' starting at offset ''from'' */
static inline void bit_set_range(char* b, uint64_t from, uint64_t len) {
for (; from%8 != 0 && len > 0; len--) { bit_set(b, from++); }
if (len >= 8) { memset(b+(from/8), 255, len/8); }
for (; len > 0; len--) { bit_set(b, from++); }
static inline void bit_set_range(char* b, uint64_t from, uint64_t len)
{
for ( ; from%8 != 0 && len > 0 ; len-- ) {
bit_set( b, from++ );
}
if (len >= 8) {
memset(b+(from/8), 255, len/8 );
from += len;
len = (len%8);
from -= len;
}
for ( ; len > 0 ; len-- ) {
bit_set( b, from++ );
}
}
/** Clears ''len'' bits in array ''b'' starting at offset ''from'' */
static inline void bit_clear_range(char* b, uint64_t from, uint64_t len) {
for (; from%8 != 0 && len > 0; len--) { bit_clear(b, from++); }
if (len >= 8) { memset(b+(from/8), 0, len/8); }
for (; len > 0; len--) { bit_clear(b, from++); }
static inline void bit_clear_range(char* b, uint64_t from, uint64_t len)
{
for ( ; from%8 != 0 && len > 0 ; len-- ) {
bit_clear( b, from++ );
}
if (len >= 8) {
memset(b+(from/8), 0, ( len/8 ) + 1);
from += len;
len = (len%8);
from -= len;
}
for ( ; len > 0 ; len-- ) {
bit_clear( b, from++ );
}
}
/** Counts the number of contiguous bits in array ''b'', starting at ''from''