nissy-core

The "engine" of nissy, including the H48 optimal solver.
git clone https://git.tronto.net/nissy-core
Download | Log | Files | Refs | README | LICENSE

avx2.h (10514B)


      1 #define CO2_AVX2 _mm256_set_epi64x(0, 0, 0, INT64_C(0x6060606060606060))
      2 #define COCW_AVX2 _mm256_set_epi64x(0, 0, 0, INT64_C(0x2020202020202020))
      3 #define CP_AVX2 _mm256_set_epi64x(0, 0, 0, INT64_C(0x0707070707070707))
      4 #define EP_AVX2 _mm256_set_epi64x(\
      5     INT64_C(0x0F0F0F0F), INT64_C(0x0F0F0F0F0F0F0F0F), 0, 0)
      6 #define EO_AVX2 _mm256_set_epi64x(\
      7     INT64_C(0x10101010), INT64_C(0x1010101010101010), 0, 0)
      8 #define ORIENT_AVX2 _mm256_set_epi64x(INT64_C(0x10101010), \
      9     INT64_C(0x1010101010101010), 0, INT64_C(0x6060606060606060))
     10 #define USED_AVX2 _mm256_set_epi64x(INT64_C(0x00000000FFFFFFFF), \
     11     INT64_C(0xFFFFFFFFFFFFFFFF), 0, INT64_C(0xFFFFFFFFFFFFFFFF))
     12 #define CARRY_AVX2 _mm256_set_epi64x(INT64_C(0x20202020), \
     13     INT64_C(0x2020202020202020), 0, INT64_C(0x6060606060606060))
     14 
     15 #define SOLVED_L INT64_C(0x0706050403020100)
     16 #define SOLVED_H INT64_C(0x0B0A0908)
     17 
     18 #define STATIC_CUBE(c_ufr, c_ubl, c_dfl, c_dbr, c_ufl, c_ubr, c_dfr, c_dbl, \
     19     e_uf, e_ub, e_db, e_df, e_ur, e_ul, e_dl, e_dr, e_fr, e_fl, e_bl, e_br) \
     20     _mm256_set_epi8(0, 0, 0, 0, e_br, e_bl, e_fl, e_fr, \
     21         e_dr, e_dl, e_ul, e_ur, e_df, e_db, e_ub, e_uf, \
     22         0, 0, 0, 0, 0, 0, 0, 0, \
     23         c_dbl, c_dfr, c_ubr, c_ufl, c_dbr, c_dfl, c_ubl, c_ufr)
     24 #define ZERO_CUBE _mm256_set_epi64x(0, 0, 0, 0)
     25 #define SOLVED_CUBE _mm256_set_epi64x(SOLVED_H, SOLVED_L, 0, SOLVED_L)
     26 
     27 STATIC_INLINE uint64_t permtoindex_Nx8(uint64_t, int64_t);
     28 STATIC_INLINE int64_t indextoperm_8x8(uint64_t);
     29 STATIC_INLINE int64_t indextoperm_4x8(uint64_t);
     30 
     31 STATIC_INLINE int
     32 popcount_u32(uint32_t x)
     33 {
     34 	return _mm_popcnt_u32(x);
     35 }
     36 
     37 STATIC void
     38 pieces(cube_t cube[static 1], uint8_t c[static 8], uint8_t e[static 12])
     39 {
     40 	uint8_t aux[32];
     41 
     42 	_mm256_storeu_si256((__m256i *)aux, *cube);
     43 	memcpy(c, aux, 8);
     44 	memcpy(e, aux+16, 12);
     45 }
     46 
     47 STATIC_INLINE bool
     48 equal(cube_t c1, cube_t c2)
     49 {
     50 	int32_t mask;
     51 	__m256i cmp;
     52 
     53 	cmp = _mm256_cmpeq_epi8(c1, c2);
     54 	mask = _mm256_movemask_epi8(cmp);
     55 
     56 	return mask == ~0;
     57 }
     58 
     59 STATIC_INLINE cube_t
     60 invertco(cube_t c)
     61 {
     62         cube_t co, shleft, shright, summed, newco, cleanco, ret;
     63 
     64         co = _mm256_and_si256(c, CO2_AVX2);
     65         shleft = _mm256_slli_epi32(co, 1);
     66         shright = _mm256_srli_epi32(co, 1);
     67         summed = _mm256_or_si256(shleft, shright);
     68         newco = _mm256_and_si256(summed, CO2_AVX2);
     69         cleanco = _mm256_xor_si256(c, co);
     70         ret = _mm256_or_si256(cleanco, newco);
     71 
     72         return ret;
     73 }
     74 
     75 STATIC_INLINE cube_t
     76 compose_edges(cube_t c1, cube_t c2)
     77 {
     78 	return compose(c1, c2);
     79 }
     80 
     81 STATIC_INLINE cube_t
     82 compose_corners(cube_t c1, cube_t c2)
     83 {
     84 	return compose(c1, c2);
     85 }
     86 
     87 STATIC_INLINE cube_t
     88 compose(cube_t c1, cube_t c2)
     89 {
     90 	/*
     91 	 * Method taken from Andrew Skalski's vcube (thanks to Arhan Chaudhary
     92 	 * for pointing this out)
     93 	 */
     94 	cube_t ss, so, su;
     95 
     96 	/* Permute */
     97 	ss = _mm256_shuffle_epi8(c1, c2);
     98 
     99 	/* Orient */
    100 	so = _mm256_and_si256(c2, ORIENT_AVX2);
    101 	ss = _mm256_add_epi8(ss, so);
    102 	su = _mm256_sub_epi8(ss, CARRY_AVX2);
    103 	ss = _mm256_min_epu8(ss, su);
    104 
    105 	return _mm256_and_si256(ss, USED_AVX2);
    106 }
    107 
    108 STATIC_INLINE cube_t
    109 inverse(cube_t c)
    110 {
    111 	/* Method taken from Andrew Skalski's vcube[1]. The addition sequence
    112 	 * was generated using [2].
    113 	 * [1] https://github.com/Voltara/vcube
    114 	 * [2] http://wwwhomes.uni-bielefeld.de/achim/addition_chain.html
    115 	 */
    116 	cube_t v3, vi, vo, vp, ret;
    117 
    118 	v3 = _mm256_shuffle_epi8(c, c);
    119 	v3 = _mm256_shuffle_epi8(v3, c);
    120 	vi = _mm256_shuffle_epi8(v3, v3);
    121 	vi = _mm256_shuffle_epi8(vi, vi);
    122 	vi = _mm256_shuffle_epi8(vi, vi);
    123 	vi = _mm256_shuffle_epi8(vi, v3);
    124 	vi = _mm256_shuffle_epi8(vi, vi);
    125 	vi = _mm256_shuffle_epi8(vi, vi);
    126 	vi = _mm256_shuffle_epi8(vi, vi);
    127 	vi = _mm256_shuffle_epi8(vi, vi);
    128 	vi = _mm256_shuffle_epi8(vi, c);
    129 	vi = _mm256_shuffle_epi8(vi, vi);
    130 	vi = _mm256_shuffle_epi8(vi, vi);
    131 	vi = _mm256_shuffle_epi8(vi, vi);
    132 	vi = _mm256_shuffle_epi8(vi, vi);
    133 	vi = _mm256_shuffle_epi8(vi, vi);
    134 	vi = _mm256_shuffle_epi8(vi, v3);
    135 	vi = _mm256_shuffle_epi8(vi, vi);
    136 	vi = _mm256_shuffle_epi8(vi, c);
    137 
    138 	vo = _mm256_and_si256(c, ORIENT_AVX2);
    139 	vo = _mm256_shuffle_epi8(vo, vi);
    140 	vp = _mm256_andnot_si256(ORIENT_AVX2, vi);
    141 	ret = _mm256_or_si256(vp, vo);
    142 	ret = _mm256_and_si256(ret, USED_AVX2);
    143 
    144 	return invertco(ret);
    145 }
    146 
    147 STATIC_INLINE uint64_t
    148 coord_co(cube_t c)
    149 {
    150 	cube_t co;
    151 	uint64_t mem[4], ret, i, p;
    152 
    153 	co = _mm256_and_si256(c, CO2_AVX2);
    154 	_mm256_storeu_si256((__m256i *)mem, co);
    155 
    156 	mem[0] >>= 5;
    157 	for (i = 0, ret = 0, p = 1; i < 7; i++, mem[0] >>= 8, p *= 3)
    158 		ret += (mem[0] & 3) * p;
    159 
    160 	return ret;
    161 }
    162 
    163 STATIC_INLINE cube_t
    164 invcoord_co(uint64_t coord)
    165 {
    166 	uint64_t i, c, p, co, mem[4] = {0};
    167 	cube_t cube, cc;
    168 
    169 	for (i = 0, p = 0, c = coord; i < 8; i++, c /= 3) {
    170 		co = i == 7 ? ((3 - (p % 3)) % 3) : (c % 3);
    171 		p += co;
    172 		mem[0] |= (uint64_t)(i + (co << COSHIFT)) << (uint64_t)(8 * i);
    173 	}
    174 
    175 	cc = _mm256_loadu_si256((__m256i *)mem);
    176 	cube = SOLVED_CUBE;
    177 	copy_corners(&cube, cc);
    178 
    179 	return cube;
    180 }
    181 
    182 STATIC_INLINE uint64_t
    183 coord_csep(cube_t c)
    184 {
    185 	cube_t cp, shifted;
    186 	int mask;
    187 
    188 	cp = _mm256_and_si256(c, CP_AVX2);
    189 	shifted = _mm256_slli_epi32(cp, 5);
    190 	mask = _mm256_movemask_epi8(shifted);
    191 
    192 	return (uint64_t)(mask & 0x7F);
    193 }
    194 
    195 STATIC_INLINE uint64_t
    196 coord_cocsep(cube_t c)
    197 {
    198 	return (coord_co(c) << UINT8_C(7)) + coord_csep(c);
    199 }
    200 
    201 STATIC_INLINE uint64_t
    202 coord_eo(cube_t c)
    203 {
    204 	cube_t eo, shifted;
    205 	int mask;
    206 
    207 	eo = _mm256_and_si256(c, EO_AVX2);
    208 	shifted = _mm256_slli_epi32(eo, 3);
    209 	mask = _mm256_movemask_epi8(shifted);
    210 
    211 	return (uint64_t)(mask >> 17);
    212 }
    213 
    214 STATIC_INLINE uint64_t
    215 coord_esep(cube_t c)
    216 {
    217 	cube_t ep;
    218 	uint64_t e, mem[4], i, j, jj, k, l, ret1, ret2, bit1, bit2, is1;
    219 
    220 	ep = _mm256_and_si256(c, EP_AVX2);
    221 	_mm256_storeu_si256((__m256i *)mem, ep);
    222 
    223 	mem[3] <<= 8;
    224 	ret1 = ret2 = 0;
    225 	k = l = 4;
    226 	for (i = 0, j = 0; i < 12; i++, mem[i/8 + 2] >>= 8) {
    227 		e = mem[i/8 + 2];
    228 
    229 		bit1 = (e & ESEPBIT_1) >> 2;
    230 		bit2 = (e & ESEPBIT_2) >> 3;
    231 		is1 = (1 - bit2) * bit1;
    232 
    233 		ret1 += bit2 * binomial[11-i][k];
    234 		k -= bit2;
    235 
    236 		jj = j < 8;
    237 		ret2 += jj * is1 * binomial[7-(j*jj)][l];
    238 		l -= is1;
    239 		j += (1-bit2);
    240 	}
    241 
    242 	return ret1 * 70 + ret2;
    243 }
    244 
    245 STATIC_INLINE cube_t
    246 invcoord_esep(uint64_t esep)
    247 {
    248 	cube_t eee, ret;
    249 	uint8_t mem[32] = {0};
    250 
    251 	invcoord_esep_array(esep % UINT64_C(70), esep / UINT64_C(70), mem+16);
    252 
    253 	ret = SOLVED_CUBE;
    254 	eee = _mm256_loadu_si256((__m256i *)mem);
    255 	copy_edges(&ret, eee);
    256 
    257 	return ret;
    258 }
    259 
    260 STATIC_INLINE void
    261 copy_corners(cube_t dest[static 1], cube_t src)
    262 {
    263 	*dest = _mm256_blend_epi32(*dest, src, 0x0F);
    264 }
    265 
    266 STATIC_INLINE void
    267 copy_edges(cube_t dest[static 1], cube_t src)
    268 {
    269 	*dest = _mm256_blend_epi32(*dest, src, 0xF0);
    270 }
    271 
    272 STATIC_INLINE void
    273 set_eo(cube_t cube[static 1], uint64_t eo)
    274 {
    275 	uint64_t eo12, eotop, eobot;
    276 	__m256i veo;
    277 
    278 	eo12 = (eo << 1) + (_mm_popcnt_u64(eo) % 2);
    279 	eotop = (eo12 & (1 << 11)) << 17 |
    280 		(eo12 & (1 << 10)) << 10 |
    281 		(eo12 & (1 << 9)) << 3 |
    282 		(eo12 & (1 << 8)) >> 4;
    283 	eobot = (eo12 & (1 << 7)) << 53 |
    284 		(eo12 & (1 << 6)) << 46 |
    285 		(eo12 & (1 << 5)) << 39 |
    286 		(eo12 & (1 << 4)) << 32 |
    287 		(eo12 & (1 << 3)) << 25 |
    288 		(eo12 & (1 << 2)) << 18 |
    289 		(eo12 & (1 << 1)) << 11 |
    290 		(eo12 & 1) << 4;
    291 	veo = _mm256_set_epi64x(eotop, eobot, 0, 0);
    292 
    293 	*cube = _mm256_andnot_si256(EO_AVX2, *cube);
    294 	*cube = _mm256_or_si256(*cube, veo);
    295 }
    296 
    297 STATIC_INLINE uint64_t
    298 permtoindex_Nx8(uint64_t n, int64_t a)
    299 {
    300 	uint64_t i, c, ret;
    301 	__m64 cmp;
    302 
    303 	for (i = 0, ret = 0; i < n; i++) {
    304 		cmp = _mm_set1_pi8(a & INT64_C(0xFF));
    305 		a = (a >> INT64_C(8)) | INT64_C(0x0F00000000000000);
    306 		cmp = _mm_cmpgt_pi8(cmp, _mm_cvtsi64_m64(a));
    307 		c = _mm_popcnt_u64(_mm_cvtm64_si64(cmp)) >> UINT64_C(3);
    308 		ret += c * factorial[n-1-i];
    309 	}
    310 
    311 	return ret;
    312 }
    313 
    314 STATIC_INLINE int64_t
    315 indextoperm_8x8(uint64_t p)
    316 {
    317 	int used;
    318 	uint64_t c, k, i, j, ret;
    319 
    320 	for (i = 0, ret = 0, used = 0; i < 8; i++) {
    321 		k = p / factorial[7-i];
    322 
    323 		/* Find k-th unused number */
    324 		for (j = 0, c = 0; c <= k; j++)
    325 			c += 1 - ((used & (1 << j)) >> j);
    326 
    327 		ret |= (j-1) << (8*i);
    328 		used |= 1 << (j-1);
    329 		p %= factorial[7-i];
    330 	}
    331 
    332 	return ret;
    333 }
    334 
    335 STATIC_INLINE int64_t
    336 indextoperm_4x8(uint64_t p)
    337 {
    338 	static const int64_t A[FACT_4] = {
    339 		[0] = INT64_C(0x03020100),
    340 		[1] = INT64_C(0x02030100),
    341 		[2] = INT64_C(0x03010200),
    342 		[3] = INT64_C(0x01030200),
    343 		[4] = INT64_C(0x02010300),
    344 		[5] = INT64_C(0x01020300),
    345 		[6] = INT64_C(0x03020001),
    346 		[7] = INT64_C(0x02030001),
    347 		[8] = INT64_C(0x03000201),
    348 		[9] = INT64_C(0x00030201),
    349 		[10] = INT64_C(0x02000301),
    350 		[11] = INT64_C(0x00020301),
    351 		[12] = INT64_C(0x03010002),
    352 		[13] = INT64_C(0x01030002),
    353 		[14] = INT64_C(0x03000102),
    354 		[15] = INT64_C(0x00030102),
    355 		[16] = INT64_C(0x01000302),
    356 		[17] = INT64_C(0x00010302),
    357 		[18] = INT64_C(0x02010003),
    358 		[19] = INT64_C(0x01020003),
    359 		[20] = INT64_C(0x02000103),
    360 		[21] = INT64_C(0x00020103),
    361 		[22] = INT64_C(0x01000203),
    362 		[23] = INT64_C(0x00010203),
    363 	};
    364 
    365 	return A[p];
    366 }
    367 
    368 STATIC_INLINE uint64_t
    369 coord_cp(cube_t cube)
    370 {
    371 	cube_t cp;
    372 	int64_t aux[4];
    373 
    374 	cp = _mm256_and_si256(cube, CP_AVX2);
    375 	_mm256_storeu_si256((__m256i *)aux, cp);
    376 
    377 	return permtoindex_Nx8(8, aux[0]);
    378 }
    379 
    380 STATIC_INLINE cube_t
    381 invcoord_cp(uint64_t i)
    382 {
    383 	return _mm256_set_epi64x(SOLVED_H, SOLVED_L, 0, indextoperm_8x8(i));
    384 }
    385 
    386 STATIC_INLINE uint64_t
    387 coord_epud(cube_t cube)
    388 {
    389 	cube_t ep;
    390 	int64_t aux[4];
    391 
    392 	ep = _mm256_and_si256(cube, EP_AVX2);
    393 	_mm256_storeu_si256((__m256i *)aux, ep);
    394 
    395 	return permtoindex_Nx8(8, aux[2]);
    396 }
    397 
    398 STATIC_INLINE cube_t
    399 invcoord_epud(uint64_t i)
    400 {
    401 	return _mm256_set_epi64x(SOLVED_H, indextoperm_8x8(i), 0, SOLVED_L);
    402 }
    403 
    404 STATIC_INLINE uint64_t
    405 coord_epe(cube_t cube)
    406 {
    407 	cube_t ep;
    408 	int64_t aux[4];
    409 
    410 	ep = _mm256_and_si256(cube, EP_AVX2);
    411 	ep = _mm256_xor_si256(ep, _mm256_set1_epi8(8));
    412 	_mm256_storeu_si256((__m256i *)aux, ep);
    413 
    414 	return permtoindex_Nx8(4, aux[3]);
    415 }
    416 
    417 STATIC_INLINE cube_t
    418 invcoord_epe(uint64_t i)
    419 {
    420 	int64_t a;
    421 	__m64 a64;
    422 
    423 	a = indextoperm_4x8(i);
    424 	a64 = _mm_add_pi8(_mm_cvtsi64_m64(a), _mm_set_pi32(0, 0x08080808));
    425 	a = _mm_cvtm64_si64(a64);
    426 
    427 	return _mm256_set_epi64x(a, SOLVED_L, 0, SOLVED_L);
    428 }
    429 
    430 STATIC_INLINE bool
    431 is_eo_even(cube_t cube)
    432 {
    433 	uint32_t mask;
    434 	__m256i e;
    435 
    436 	e = _mm256_and_si256(cube, EO_AVX2);
    437 	e = _mm256_slli_epi16(e, 7-EOSHIFT);
    438 	mask = _mm256_movemask_epi8(e);
    439 
    440 	return popcount_u32(mask) % 2 == 0;
    441 }
    442 
    443 STATIC_INLINE uint64_t
    444 coord_epudsep(cube_t cube)
    445 {
    446 	uint8_t aux[32];
    447 
    448 	_mm256_storeu_si256((__m256i *)aux, cube);
    449 	return coord_epudsep_array(aux + 16);
    450 }
    451 
    452 STATIC_INLINE cube_t
    453 invcoord_epudsep(uint64_t i)
    454 {
    455 	cube_t cube, elow;
    456 	uint8_t e[32] = {0};
    457 
    458 	invcoord_epudsep_array(i, e+16);
    459 	elow = _mm256_loadu_si256((__m256i *)e);
    460 	cube = _mm256_set_epi64x(SOLVED_H, 0, 0, SOLVED_L);
    461 
    462 	return _mm256_or_si256(elow, cube);
    463 }