I'm tinkering around with AVX-2 instructions and I'm looking for a fast way to count the number of leading zeros in a __m256i
word (which has 256 bits).
So far, I have figured out the following way:
// Computes the number of leading zero bits.
// Here, avx_word is of type _m256i.
if (!_mm256_testz_si256(avx_word, avx_word)) {
uint64_t word = _mm256_extract_epi64(avx_word, 0);
if (word > 0)
return (__builtin_clzll(word));
word = _mm256_extract_epi64(avx_word, 1);
if (word > 0)
return (__builtin_clzll(word) + 64);
word = _mm256_extract_epi64(avx_word, 2);
if (word > 0)
return (__builtin_clzll(word) + 128);
word = _mm256_extract_epi64(avx_word, 3);
return (__builtin_clzll(word) + 192);
} else
return 256; // word is entirely zero
However, I find it rather clumsy to figure out the exact non-zero word within the 256 bit register.
Does anybody know if there is a more elegant (or faster) way to do this?
Just as an additional information: I actually want to compute the index of the first set bit for arbitrarily long vectors created by logical ANDs, and I am comparing the performance of standard 64 bit operations with SSE and AVX-2 code. Here is my entire test code:
#include <stdio.h>
#include <stdlib.h>
#include <immintrin.h>
#include <stdint.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <stdalign.h>
#define ALL 0xFFFFFFFF
#define NONE 0x0
#define BV_SHIFTBITS ((size_t) 6)
#define BV_MOD_WORD ((size_t) 63)
#define BV_ONE ((uint64_t) 1)
#define BV_ZERO ((uint64_t) 0)
#define BV_WORDSIZE ((uint64_t) 64)
uint64_t*
Vector_new(
size_t num_bits) {
assert ((num_bits % 256) == 0);
size_t num_words = num_bits >> BV_SHIFTBITS;
size_t mod = num_bits & BV_MOD_WORD;
if (mod > 0)
assert (0);
uint64_t* words;
posix_memalign((void**) &(words), 32, sizeof(uint64_t) * num_words);
for (size_t i = 0; i < num_words; ++i)
words[i] = 0;
return words;
}
void
Vector_set(
uint64_t* vector,
size_t pos) {
const size_t word_index = pos >> BV_SHIFTBITS;
const size_t offset = pos & BV_MOD_WORD;
vector[word_index] |= (BV_ONE << (BV_MOD_WORD - offset));
}
size_t
Vector_and_first_bit(
uint64_t** vectors,
const size_t num_vectors,
const size_t num_words) {
for (size_t i = 0; i < num_words; ++i) {
uint64_t word = vectors[0][i];
for (size_t j = 1; j < num_vectors; ++j)
word &= vectors[j][i];
if (word > 0)
return (1 + i * BV_WORDSIZE + __builtin_clzll(word));
}
return 0;
}
size_t
Vector_and_first_bit_256(
uint64_t** vectors,
const size_t num_vectors,
const size_t num_avx_words) {
for (size_t i = 0; i < num_avx_words; ++i) {
const size_t addr_offset = i << 2;
__m256i avx_word = _mm256_load_si256(
(__m256i const*) (vectors[0] + addr_offset));
// AND the AVX words
for (size_t j = 1; j < num_vectors; ++j) {
avx_word = _mm256_and_si256(
avx_word,
_mm256_load_si256((__m256i const*) (vectors[j] + addr_offset))
);
}
// test whether resulting AVX word is not zero
if (!_mm256_testz_si256(avx_word, avx_word)) {
uint64_t word = _mm256_extract_epi64(avx_word, 0);
const size_t shift = i << 8;
if (word > 0)
return (1 + shift + __builtin_clzll(word));
word = _mm256_extract_epi64(avx_word, 1);
if (word > 0)
return (1 + shift + __builtin_clzll(word) + 64);
word = _mm256_extract_epi64(avx_word, 2);
if (word > 0)
return (1 + shift + __builtin_clzll(word) + 128);
word = _mm256_extract_epi64(avx_word, 3);
return (1 + shift + __builtin_clzll(word) + 192);
}
}
return 0;
}
size_t
Vector_and_first_bit_128(
uint64_t** vectors,
const size_t num_vectors,
const size_t num_avx_words) {
for (size_t i = 0; i < num_avx_words; ++i) {
const size_t addr_offset = i << 1;
__m128i avx_word = _mm_load_si128(
(__m128i const*) (vectors[0] + addr_offset));
// AND the AVX words
for (size_t j = 1; j < num_vectors; ++j) {
avx_word = _mm_and_si128(
avx_word,
_mm_load_si128((__m128i const*) (vectors[j] + addr_offset))
);
}
// test whether resulting AVX word is not zero
if (!_mm_test_all_zeros(avx_word, avx_word)) {
uint64_t word = _mm_extract_epi64(avx_word, 0);
if (word > 0)
return (1 + (i << 7) + __builtin_clzll(word));
word = _mm_extract_epi64(avx_word, 1);
return (1 + (i << 7) + __builtin_clzll(word) + 64);
}
}
return 0;
}
uint64_t*
make_random_vector(
const size_t num_bits,
const size_t propability) {
uint64_t* vector = Vector_new(num_bits);
for (size_t i = 0; i < num_bits; ++i) {
const int x = rand() % 10;
if (x >= (int) propability)
Vector_set(vector, i);
}
return vector;
}
size_t
millis(
const struct timeval* end,
const struct timeval* start) {
struct timeval e = *end;
struct timeval s = *start;
return (1000 * (e.tv_sec - s.tv_sec) + (e.tv_usec - s.tv_usec) / 1000);
}
int
main(
int argc,
char** argv) {
if (argc != 6)
printf("fuck %s\n", argv[0]);
srand(time(NULL));
const size_t num_vectors = atoi(argv[1]);
const size_t size = atoi(argv[2]);
const size_t num_iterations = atoi(argv[3]);
const size_t num_dimensions = atoi(argv[4]);
const size_t propability = atoi(argv[5]);
const size_t num_words = size / 64;
const size_t num_sse_words = num_words / 2;
const size_t num_avx_words = num_words / 4;
assert(num_vectors > 0);
assert(size > 0);
assert(num_iterations > 0);
assert(num_dimensions > 0);
struct timeval t1;
gettimeofday(&t1, NULL);
uint64_t*** vectors = (uint64_t***) malloc(sizeof(uint64_t**) * num_vectors);
for (size_t j = 0; j < num_vectors; ++j) {
vectors[j] = (uint64_t**) malloc(sizeof(uint64_t*) * num_dimensions);
for (size_t i = 0; i < num_dimensions; ++i)
vectors[j][i] = make_random_vector(size, propability);
}
struct timeval t2;
gettimeofday(&t2, NULL);
printf("Creation: %zu ms\n", millis(&t2, &t1));
size_t* results_64 = (size_t*) malloc(sizeof(size_t) * num_vectors);
size_t* results_128 = (size_t*) malloc(sizeof(size_t) * num_vectors);
size_t* results_256 = (size_t*) malloc(sizeof(size_t) * num_vectors);
gettimeofday(&t1, NULL);
for (size_t j = 0; j < num_iterations; ++j)
for (size_t i = 0; i < num_vectors; ++i)
results_64[i] = Vector_and_first_bit(vectors[i], num_dimensions,
num_words);
gettimeofday(&t2, NULL);
const size_t millis_64 = millis(&t2, &t1);
printf("64 : %zu ms\n", millis_64);
gettimeofday(&t1, NULL);
for (size_t j = 0; j < num_iterations; ++j)
for (size_t i = 0; i < num_vectors; ++i)
results_128[i] = Vector_and_first_bit_128(vectors[i],
num_dimensions, num_sse_words);
gettimeofday(&t2, NULL);
const size_t millis_128 = millis(&t2, &t1);
const double factor_128 = (double) millis_64 / (double) millis_128;
printf("128 : %zu ms (factor: %.2f)\n", millis_128, factor_128);
gettimeofday(&t1, NULL);
for (size_t j = 0; j < num_iterations; ++j)
for (size_t i = 0; i < num_vectors; ++i)
results_256[i] = Vector_and_first_bit_256(vectors[i],
num_dimensions, num_avx_words);
gettimeofday(&t2, NULL);
const size_t millis_256 = millis(&t2, &t1);
const double factor_256 = (double) millis_64 / (double) millis_256;
printf("256 : %zu ms (factor: %.2f)\n", millis_256, factor_256);
for (size_t i = 0; i < num_vectors; ++i) {
if (results_64[i] != results_256[i])
printf("ERROR: %zu (64) != %zu (256) with i = %zu\n", results_64[i],
results_256[i], i);
if (results_64[i] != results_128[i])
printf("ERROR: %zu (64) != %zu (128) with i = %zu\n", results_64[i],
results_128[i], i);
}
free(results_64);
free(results_128);
free(results_256);
for (size_t j = 0; j < num_vectors; ++j) {
for (size_t i = 0; i < num_dimensions; ++i)
free(vectors[j][i]);
free(vectors[j]);
}
free(vectors);
return 0;
}
To compile:
gcc -o main main.c -O3 -Wall -Wextra -pedantic-errors -Werror -march=native -std=c99 -fno-tree-vectorize
To execute:
./main 1000 8192 50000 5 9
The parameters mean: 1000 testcases, vectors of length 8192 bits, 50000, test repetitions (last two parameters are minor tweaks).
Sample output for the above call on my machine:
Creation: 363 ms
64 : 15000 ms
128 : 10070 ms (factor: 1.49)
256 : 6784 ms (factor: 2.21)
Leading zero's in a binary number is equal to zeros preceding the highest order set bit of the number. 2. int lim = sizeof(int) * 8; is used to find the total number of bits required to store an integer in memory.
Leading zeros are used to make ascending order of numbers correspond with alphabetical order: e.g., 11 comes alphabetically before 2, but after 02. (See, e.g., ISO 8601.)
If your input values are uniformly distributed, almost all of the time the highest set bit will be in the top 64 bits of the vector (1 in 2^64). A branch on this condition will predict very well. @Nejc's answer is good for that case.
But many problems where lzcnt
is part of the solution have a uniformly distributed output (or similar), so a branchless version has an advantage. Not strictly uniform, but anything where it's common for the highest set bit to be somewhere other than the highest 64 bits.
Wim's idea of lzcnt on a compare bitmap to find the right element is a very good approach.
However, runtime-variable indexing of the vector with a store/reload is probably better than a shuffle. Store-forwarding latency is low (maybe 5 to 7 cycles on Skylake), and that latency is in parallel with the index generation (compare / movemask / lzcnt). The movd/vpermd/movd
lane-crossing shuffle strategy takes 5 cycles after the index is known, to get the right element into an integer register. (See http://agner.org/optimize/)
I think this version should be better latency on Haswell/Skylake (and Ryzen), and also better throughput. (vpermd
is quite slow on Ryzen, so it should be very good there) The address calculation for the load should have similar latency as the store-forwarding, so it's a toss-up which one is actually the critical path.
Aligning the stack by 32 to avoid cache-line splits on a 32-byte store takes extra instructions, so this is best if it can inline into a function that uses it multiple times, or already needs that much alignment for some other __m256i
.
#include <stdint.h>
#include <immintrin.h>
#ifndef _MSC_VER
#include <stdalign.h> //MSVC is missing this?
#else
#include <intrin.h>
#pragma intrinsic(_BitScanReverse) // https://msdn.microsoft.com/en-us/library/fbxyd7zd.aspx suggests this
#endif
// undefined result for mask=0, like BSR
uint32_t bsr_nonzero(uint32_t mask)
{
// on Intel, bsr has a minor advantage for the first step
// for AMD, BSR is slow so you should use 31-LZCNT.
//return 31 - _lzcnt_u32(mask);
// Intel's docs say there should be a _bit_scan_reverse(x), maybe try that with ICC
#ifdef _MSC_VER
unsigned long tmp;
_BitScanReverse(&tmp, mask);
return tmp;
#else
return 31 - __builtin_clz(mask);
#endif
}
And the interesting part:
int mm256_lzcnt_si256(__m256i vec)
{
__m256i nonzero_elem = _mm256_cmpeq_epi8(vec, _mm256_setzero_si256());
unsigned mask = ~_mm256_movemask_epi8(nonzero_elem);
if (mask == 0)
return 256; // if this is rare, branching is probably good.
alignas(32) // gcc chooses to align elems anyway, with its clunky code
uint8_t elems[32];
_mm256_storeu_si256((__m256i*)elems, vec);
// unsigned lz_msk = _lzcnt_u32(mask);
// unsigned idx = 31 - lz_msk; // can use bsr to get the 31-x, because mask is known to be non-zero.
// This takes the 31-x latency off the critical path, in parallel with final lzcnt
unsigned idx = bsr_nonzero(mask);
unsigned lz_msk = 31 - idx;
unsigned highest_nonzero_byte = elems[idx];
return lz_msk * 8 + _lzcnt_u32(highest_nonzero_byte) - 24;
// lzcnt(byte)-24, because we don't want to count the leading 24 bits of padding.
}
On Godbolt with gcc7.3 -O3 -march=haswell
, we get asm like this to count ymm1
into esi
.
vpxor xmm0, xmm0, xmm0
mov esi, 256
vpcmpeqd ymm0, ymm1, ymm0
vpmovmskb eax, ymm0
xor eax, -1 # ~mask and set flags, unlike NOT
je .L35
bsr eax, eax
vmovdqa YMMWORD PTR [rbp-48], ymm1 # note no dependency on anything earlier; OoO exec can run it early
mov ecx, 31
mov edx, eax # this is redundant, gcc should just use rax later. But it's zero-latency on HSW/SKL and Ryzen.
sub ecx, eax
movzx edx, BYTE PTR [rbp-48+rdx] # has to wait for the index in edx
lzcnt edx, edx
lea esi, [rdx-24+rcx*8] # lzcnt(byte) + lzcnt(vectormask) * 8
.L35:
For finding the highest non-zero element (the 31 - lzcnt(~movemask)
), we use bsr
to directly get the bit (and thus byte) index, and take a subtract off the critical path. This is safe as long as we branch on the mask being zero. (A branchless version would need to initialize the register to avoid an out-of-bounds index).
On AMD CPUs, bsr
is significantly slower than lzcnt
. On Intel CPUs, they're the same performance, except for minor variations in output-dependency details.
bsr
with an input of zero leaves the destination register unmodified, but GCC doesn't provide a way to take advantage of that. (Intel only documents it as undefined output, but AMD documents the actual behaviour of Intel / AMD CPUs as producing the old value in the destination register).
bsr
sets ZF if the input was zero, rather than based on the output like most instructions. (This and the output dependency may be why it's slow on AMD.) Branching on the BSR flags is not particularly better than branching on ZF as set by xor eax,-1
to invert the mask, which is what gcc does. Anyway, Intel does document a _BitScanReverse(&idx, mask)
intrinsic that returns a bool
, but gcc doesn't support it (not even with x86intrin.h
). The GNU C builtin doesn't return a boolean to let you use the flag result, but maybe gcc would make smart asm using the flag output of bsr
if you check for the input C variable being non-zero.
Using a dword (uint32_t
) array and vmovmskps
would let the 2nd lzcnt
use a memory source operand instead of needing a movzx
to zero-extend a single byte. But lzcnt
has a false dependency on Intel CPUs before Skylake, so compilers might tend to load separately and use lzcnt same,same
as a workaround anyway. (I didn't check.)
Wim's version needs lz_msk-24
because the high 24 bits are always zero with an 8-bit mask. But a 32-bit mask fills a 32-bit register.
This version with 8 bit elements and a 32-bit mask is the reverse: we need to lzcnt
the selected byte, not including the 24 leading zero bits in the register. So our -24
moves to a different spot, not part of the critical path for indexing the array.
gcc chooses to do it as part of a single 3-component LEA (reg + reg*scale - const
), which is great for throughput, but puts it on the critical path after the final lzcnt
. (It's not free because 3-component LEA has extra latency vs. reg + reg*scale
on Intel CPUs. See Agner Fog's instruction tables).
A multiply by 8 can be done as part of an lea
, but a multiply by 32 would need a shift (or be folded into two separate LEAs).
Intel's optimization manual says (Table 2-24) even Sandybridge can forward from a 256-bit store to single-byte loads without a problem, so I think it's fine on AVX2 CPUs, the same as forwarding to 32-bit loads that of 4-byte-aligned chunks of the store.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With