|
|
|
@ -7,10 +7,52 @@ |
|
|
|
|
|
|
|
|
|
#if GLM_ARCH & GLM_ARCH_SSE2_BIT |
|
|
|
|
|
|
|
|
|
typedef __m128 glm_vec4; |
|
|
|
|
typedef __m128i glm_ivec4; |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_add(glm_vec4 a, glm_vec4 b) |
|
|
|
|
{ |
|
|
|
|
return _mm_add_ps(a, b); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_add(glm_vec4 a, glm_vec4 b) |
|
|
|
|
{ |
|
|
|
|
return _mm_add_ss(a, b); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_sub(glm_vec4 a, glm_vec4 b) |
|
|
|
|
{ |
|
|
|
|
return _mm_sub_ps(a, b); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_sub(glm_vec4 a, glm_vec4 b) |
|
|
|
|
{ |
|
|
|
|
return _mm_sub_ss(a, b); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mul(glm_vec4 a, glm_vec4 b) |
|
|
|
|
{ |
|
|
|
|
return _mm_mul_ps(a, b); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_mul(glm_vec4 a, glm_vec4 b) |
|
|
|
|
{ |
|
|
|
|
return _mm_mul_ss(a, b); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_div(glm_vec4 a, glm_vec4 b) |
|
|
|
|
{ |
|
|
|
|
return _mm_div_ps(a, b); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_div(glm_vec4 a, glm_vec4 b) |
|
|
|
|
{ |
|
|
|
|
return _mm_div_ss(a, b); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_div_lowp(glm_vec4 a, glm_vec4 b) |
|
|
|
|
{ |
|
|
|
|
return glm_vec4_mul(a, _mm_rcp_ps(b)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec1_fma(__m128 a, __m128 b, __m128 c) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_fma(glm_vec4 a, glm_vec4 b, glm_vec4 c) |
|
|
|
|
{ |
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX2_BIT |
|
|
|
|
return _mm_fmadd_ss(a, b, c); |
|
|
|
@ -19,172 +61,170 @@ GLM_FUNC_QUALIFIER __m128 glm_vec1_fma(__m128 a, __m128 b, __m128 c) |
|
|
|
|
# endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_fma(__m128 a, __m128 b, __m128 c) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_fma(glm_vec4 a, glm_vec4 b, glm_vec4 c) |
|
|
|
|
{ |
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX2_BIT |
|
|
|
|
return _mm_fmadd_ps(a, b, c); |
|
|
|
|
# else |
|
|
|
|
return _mm_add_ps(_mm_mul_ps(a, b), c); |
|
|
|
|
return glm_vec4_add(glm_vec4_mul(a, b), c); |
|
|
|
|
# endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_abs(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_abs(glm_vec4 x) |
|
|
|
|
{ |
|
|
|
|
return _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF))); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128i glm_ivec4_abs(__m128i x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_ivec4 glm_ivec4_abs(glm_ivec4 x) |
|
|
|
|
{ |
|
|
|
|
# if GLM_ARCH & GLM_ARCH_SSSE3_BIT |
|
|
|
|
return _mm_sign_epi32(x, x); |
|
|
|
|
# else |
|
|
|
|
__m128i const sgn0 = _mm_srai_epi32(x, 31); |
|
|
|
|
__m128i const inv0 = _mm_xor_si128(x, sgn0); |
|
|
|
|
__m128i const sub0 = _mm_sub_epi32(inv0, sgn0); |
|
|
|
|
glm_ivec4 const sgn0 = _mm_srai_epi32(x, 31); |
|
|
|
|
glm_ivec4 const inv0 = _mm_xor_si128(x, sgn0); |
|
|
|
|
glm_ivec4 const sub0 = _mm_sub_epi32(inv0, sgn0); |
|
|
|
|
return sub0; |
|
|
|
|
# endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_sign(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_sign(glm_vec4 x) |
|
|
|
|
{ |
|
|
|
|
__m128 const zro0 = _mm_setzero_ps(); |
|
|
|
|
__m128 const cmp0 = _mm_cmplt_ps(x, zro0); |
|
|
|
|
__m128 const cmp1 = _mm_cmpgt_ps(x, zro0); |
|
|
|
|
__m128 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(-1.0f)); |
|
|
|
|
__m128 const and1 = _mm_and_ps(cmp1, _mm_set1_ps(1.0f)); |
|
|
|
|
__m128 const or0 = _mm_or_ps(and0, and1);; |
|
|
|
|
glm_vec4 const zro0 = _mm_setzero_ps(); |
|
|
|
|
glm_vec4 const cmp0 = _mm_cmplt_ps(x, zro0); |
|
|
|
|
glm_vec4 const cmp1 = _mm_cmpgt_ps(x, zro0); |
|
|
|
|
glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(-1.0f)); |
|
|
|
|
glm_vec4 const and1 = _mm_and_ps(cmp1, _mm_set1_ps(1.0f)); |
|
|
|
|
glm_vec4 const or0 = _mm_or_ps(and0, and1);; |
|
|
|
|
return or0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_round(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_round(glm_vec4 x) |
|
|
|
|
{ |
|
|
|
|
# if GLM_ARCH & GLM_ARCH_SSE41_BIT |
|
|
|
|
return _mm_round_ps(x, _MM_FROUND_TO_NEAREST_INT); |
|
|
|
|
# else |
|
|
|
|
__m128 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(0x80000000)); |
|
|
|
|
__m128 const and0 = _mm_and_ps(sgn0, x); |
|
|
|
|
__m128 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); |
|
|
|
|
__m128 const add0 = _mm_add_ps(x, or0); |
|
|
|
|
__m128 const sub0 = _mm_sub_ps(add0, or0); |
|
|
|
|
glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(0x80000000)); |
|
|
|
|
glm_vec4 const and0 = _mm_and_ps(sgn0, x); |
|
|
|
|
glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); |
|
|
|
|
glm_vec4 const add0 = glm_vec4_add(x, or0); |
|
|
|
|
glm_vec4 const sub0 = glm_vec4_sub(add0, or0); |
|
|
|
|
return sub0; |
|
|
|
|
# endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_floor(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_floor(glm_vec4 x) |
|
|
|
|
{ |
|
|
|
|
# if GLM_ARCH & GLM_ARCH_SSE41_BIT |
|
|
|
|
return _mm_floor_ps(x); |
|
|
|
|
# else |
|
|
|
|
__m128 const rnd0 = glm_vec4_round(x); |
|
|
|
|
__m128 const cmp0 = _mm_cmplt_ps(x, rnd0); |
|
|
|
|
__m128 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); |
|
|
|
|
__m128 const sub0 = _mm_sub_ps(rnd0, and0); |
|
|
|
|
glm_vec4 const rnd0 = glm_vec4_round(x); |
|
|
|
|
glm_vec4 const cmp0 = _mm_cmplt_ps(x, rnd0); |
|
|
|
|
glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); |
|
|
|
|
glm_vec4 const sub0 = glm_vec4_sub(rnd0, and0); |
|
|
|
|
return sub0; |
|
|
|
|
# endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* trunc TODO
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_trunc(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_trunc(glm_vec4 x) |
|
|
|
|
{ |
|
|
|
|
return __m128(); |
|
|
|
|
return glm_vec4(); |
|
|
|
|
} |
|
|
|
|
*/ |
|
|
|
|
|
|
|
|
|
//roundEven
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_roundEven(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_roundEven(glm_vec4 x) |
|
|
|
|
{ |
|
|
|
|
__m128 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(0x80000000)); |
|
|
|
|
__m128 const and0 = _mm_and_ps(sgn0, x); |
|
|
|
|
__m128 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); |
|
|
|
|
__m128 const add0 = _mm_add_ps(x, or0); |
|
|
|
|
__m128 const sub0 = _mm_sub_ps(add0, or0); |
|
|
|
|
glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(0x80000000)); |
|
|
|
|
glm_vec4 const and0 = _mm_and_ps(sgn0, x); |
|
|
|
|
glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); |
|
|
|
|
glm_vec4 const add0 = glm_vec4_add(x, or0); |
|
|
|
|
glm_vec4 const sub0 = glm_vec4_sub(add0, or0); |
|
|
|
|
return sub0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_ceil(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_ceil(glm_vec4 x) |
|
|
|
|
{ |
|
|
|
|
# if GLM_ARCH & GLM_ARCH_SSE41_BIT |
|
|
|
|
return _mm_ceil_ps(x); |
|
|
|
|
# else |
|
|
|
|
__m128 const rnd0 = glm_vec4_round(x); |
|
|
|
|
__m128 const cmp0 = _mm_cmpgt_ps(x, rnd0); |
|
|
|
|
__m128 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); |
|
|
|
|
__m128 const add0 = _mm_add_ps(rnd0, and0); |
|
|
|
|
glm_vec4 const rnd0 = glm_vec4_round(x); |
|
|
|
|
glm_vec4 const cmp0 = _mm_cmpgt_ps(x, rnd0); |
|
|
|
|
glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); |
|
|
|
|
glm_vec4 const add0 = glm_vec4_add(rnd0, and0); |
|
|
|
|
return add0; |
|
|
|
|
# endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_fract(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_fract(glm_vec4 x) |
|
|
|
|
{ |
|
|
|
|
__m128 const flr0 = glm_vec4_floor(x); |
|
|
|
|
__m128 const sub0 = _mm_sub_ps(x, flr0); |
|
|
|
|
glm_vec4 const flr0 = glm_vec4_floor(x); |
|
|
|
|
glm_vec4 const sub0 = glm_vec4_sub(x, flr0); |
|
|
|
|
return sub0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_mod(__m128 x, __m128 y) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mod(glm_vec4 x, glm_vec4 y) |
|
|
|
|
{ |
|
|
|
|
__m128 const div0 = _mm_div_ps(x, y); |
|
|
|
|
__m128 const flr0 = glm_vec4_floor(div0); |
|
|
|
|
__m128 const mul0 = _mm_mul_ps(y, flr0); |
|
|
|
|
__m128 const sub0 = _mm_sub_ps(x, mul0); |
|
|
|
|
glm_vec4 const div0 = glm_vec4_div(x, y); |
|
|
|
|
glm_vec4 const flr0 = glm_vec4_floor(div0); |
|
|
|
|
glm_vec4 const mul0 = glm_vec4_mul(y, flr0); |
|
|
|
|
glm_vec4 const sub0 = glm_vec4_sub(x, mul0); |
|
|
|
|
return sub0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_clamp(__m128 v, __m128 minVal, __m128 maxVal) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_clamp(glm_vec4 v, glm_vec4 minVal, glm_vec4 maxVal) |
|
|
|
|
{ |
|
|
|
|
__m128 const min0 = _mm_min_ps(v, maxVal); |
|
|
|
|
__m128 const max0 = _mm_max_ps(min0, minVal); |
|
|
|
|
glm_vec4 const min0 = _mm_min_ps(v, maxVal); |
|
|
|
|
glm_vec4 const max0 = _mm_max_ps(min0, minVal); |
|
|
|
|
return max0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_mix(__m128 v1, __m128 v2, __m128 a) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mix(glm_vec4 v1, glm_vec4 v2, glm_vec4 a) |
|
|
|
|
{ |
|
|
|
|
__m128 const sub0 = _mm_sub_ps(_mm_set1_ps(1.0f), a); |
|
|
|
|
__m128 const mul0 = _mm_mul_ps(v1, sub0); |
|
|
|
|
__m128 const mad0 = glm_vec4_fma(v2, a, mul0); |
|
|
|
|
glm_vec4 const sub0 = glm_vec4_sub(_mm_set1_ps(1.0f), a); |
|
|
|
|
glm_vec4 const mul0 = glm_vec4_mul(v1, sub0); |
|
|
|
|
glm_vec4 const mad0 = glm_vec4_fma(v2, a, mul0); |
|
|
|
|
return mad0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//step
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_step(__m128 edge, __m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_step(glm_vec4 edge, glm_vec4 x) |
|
|
|
|
{ |
|
|
|
|
__m128 const cmp = _mm_cmple_ps(x, edge); |
|
|
|
|
glm_vec4 const cmp = _mm_cmple_ps(x, edge); |
|
|
|
|
return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_setzero_ps(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// smoothstep
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_smoothstep(__m128 edge0, __m128 edge1, __m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_smoothstep(glm_vec4 edge0, glm_vec4 edge1, glm_vec4 x) |
|
|
|
|
{ |
|
|
|
|
__m128 const sub0 = _mm_sub_ps(x, edge0); |
|
|
|
|
__m128 const sub1 = _mm_sub_ps(edge1, edge0); |
|
|
|
|
__m128 const div0 = _mm_sub_ps(sub0, sub1); |
|
|
|
|
__m128 const clp0 = glm_vec4_clamp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f)); |
|
|
|
|
__m128 const mul0 = _mm_mul_ps(_mm_set1_ps(2.0f), clp0); |
|
|
|
|
__m128 const sub2 = _mm_sub_ps(_mm_set1_ps(3.0f), mul0); |
|
|
|
|
__m128 const mul1 = _mm_mul_ps(clp0, clp0); |
|
|
|
|
__m128 const mul2 = _mm_mul_ps(mul1, sub2); |
|
|
|
|
glm_vec4 const sub0 = glm_vec4_sub(x, edge0); |
|
|
|
|
glm_vec4 const sub1 = glm_vec4_sub(edge1, edge0); |
|
|
|
|
glm_vec4 const div0 = glm_vec4_sub(sub0, sub1); |
|
|
|
|
glm_vec4 const clp0 = glm_vec4_clamp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f)); |
|
|
|
|
glm_vec4 const mul0 = glm_vec4_mul(_mm_set1_ps(2.0f), clp0); |
|
|
|
|
glm_vec4 const sub2 = glm_vec4_sub(_mm_set1_ps(3.0f), mul0); |
|
|
|
|
glm_vec4 const mul1 = glm_vec4_mul(clp0, clp0); |
|
|
|
|
glm_vec4 const mul2 = glm_vec4_mul(mul1, sub2); |
|
|
|
|
return mul2; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Agner Fog method
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_nan(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_nan(glm_vec4 x) |
|
|
|
|
{ |
|
|
|
|
__m128i const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
|
|
|
|
|
__m128i const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
|
|
|
|
|
__m128i const t3 = _mm_set1_epi32(0xFF000000); // exponent mask
|
|
|
|
|
__m128i const t4 = _mm_and_si128(t2, t3); // exponent
|
|
|
|
|
__m128i const t5 = _mm_andnot_si128(t3, t2); // fraction
|
|
|
|
|
__m128i const Equal = _mm_cmpeq_epi32(t3, t4); |
|
|
|
|
__m128i const Nequal = _mm_cmpeq_epi32(t5, _mm_setzero_si128()); |
|
|
|
|
__m128i const And = _mm_and_si128(Equal, Nequal); |
|
|
|
|
return _mm_castsi128_ps(And); // exponent = all 1s and fraction != 0
|
|
|
|
|
glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
|
|
|
|
|
glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
|
|
|
|
|
glm_ivec4 const t3 = _mm_set1_epi32(0xFF000000); // exponent mask
|
|
|
|
|
glm_ivec4 const t4 = _mm_and_si128(t2, t3); // exponent
|
|
|
|
|
glm_ivec4 const t5 = _mm_andnot_si128(t3, t2); // fraction
|
|
|
|
|
glm_ivec4 const Equal = _mm_cmpeq_epi32(t3, t4); |
|
|
|
|
glm_ivec4 const Nequal = _mm_cmpeq_epi32(t5, _mm_setzero_si128()); |
|
|
|
|
glm_ivec4 const And = _mm_and_si128(Equal, Nequal); |
|
|
|
|
return _mm_castsi128_ps(And); // exponent = all 1s and fraction != 0
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Agner Fog method
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_inf(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_inf(glm_vec4 x) |
|
|
|
|
{ |
|
|
|
|
__m128i const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
|
|
|
|
|
__m128i const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
|
|
|
|
|
glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
|
|
|
|
|
glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
|
|
|
|
|
return _mm_castsi128_ps(_mm_cmpeq_epi32(t2, _mm_set1_epi32(0xFF000000))); // exponent is all 1s, fraction is 0
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|