|
|
|
@ -7,8 +7,10 @@ |
|
|
|
|
|
|
|
|
|
#if GLM_ARCH & GLM_ARCH_SSE2_BIT |
|
|
|
|
|
|
|
|
|
//mad
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v1_mad(__m128 a, __m128 b, __m128 c) |
|
|
|
|
typedef __m128 glm_vec4; |
|
|
|
|
typedef __m128i glm_ivec4; |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec1_fma(__m128 a, __m128 b, __m128 c) |
|
|
|
|
{ |
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX2_BIT |
|
|
|
|
return _mm_fmadd_ss(a, b, c); |
|
|
|
@ -17,8 +19,7 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v1_mad(__m128 a, __m128 b, __m128 c) |
|
|
|
|
# endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//mad
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_mad(__m128 a, __m128 b, __m128 c) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_fma(__m128 a, __m128 b, __m128 c) |
|
|
|
|
{ |
|
|
|
|
# if GLM_ARCH & GLM_ARCH_AVX2_BIT |
|
|
|
|
return _mm_fmadd_ps(a, b, c); |
|
|
|
@ -27,13 +28,12 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_mad(__m128 a, __m128 b, __m128 c) |
|
|
|
|
# endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//abs
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_abs(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_abs(__m128 x) |
|
|
|
|
{ |
|
|
|
|
return _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF))); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128i glm_i32v4_abs(__m128i x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128i glm_ivec4_abs(__m128i x) |
|
|
|
|
{ |
|
|
|
|
# if GLM_ARCH & GLM_ARCH_SSSE3_BIT |
|
|
|
|
return _mm_sign_epi32(x, x); |
|
|
|
@ -45,8 +45,7 @@ GLM_FUNC_QUALIFIER __m128i glm_i32v4_abs(__m128i x) |
|
|
|
|
# endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//sign
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_sgn(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_sign(__m128 x) |
|
|
|
|
{ |
|
|
|
|
__m128 const zro0 = _mm_setzero_ps(); |
|
|
|
|
__m128 const cmp0 = _mm_cmplt_ps(x, zro0); |
|
|
|
@ -57,8 +56,7 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_sgn(__m128 x) |
|
|
|
|
return or0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//round
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_rnd(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_round(__m128 x) |
|
|
|
|
{ |
|
|
|
|
# if GLM_ARCH & GLM_ARCH_SSE41_BIT |
|
|
|
|
return _mm_round_ps(x, _MM_FROUND_TO_NEAREST_INT); |
|
|
|
@ -72,13 +70,12 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_rnd(__m128 x) |
|
|
|
|
# endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//floor
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_flr(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_floor(__m128 x) |
|
|
|
|
{ |
|
|
|
|
# if GLM_ARCH & GLM_ARCH_SSE41_BIT |
|
|
|
|
return _mm_floor_ps(x); |
|
|
|
|
# else |
|
|
|
|
__m128 const rnd0 = glm_f32v4_rnd(x); |
|
|
|
|
__m128 const rnd0 = glm_vec4_round(x); |
|
|
|
|
__m128 const cmp0 = _mm_cmplt_ps(x, rnd0); |
|
|
|
|
__m128 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); |
|
|
|
|
__m128 const sub0 = _mm_sub_ps(rnd0, and0); |
|
|
|
@ -87,14 +84,14 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_flr(__m128 x) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* trunc TODO
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_trc(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_trunc(__m128 x) |
|
|
|
|
{ |
|
|
|
|
return __m128(); |
|
|
|
|
} |
|
|
|
|
*/ |
|
|
|
|
|
|
|
|
|
//roundEven
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_rde(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_roundEven(__m128 x) |
|
|
|
|
{ |
|
|
|
|
__m128 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(0x80000000)); |
|
|
|
|
__m128 const and0 = _mm_and_ps(sgn0, x); |
|
|
|
@ -104,12 +101,12 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_rde(__m128 x) |
|
|
|
|
return sub0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_ceil(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_ceil(__m128 x) |
|
|
|
|
{ |
|
|
|
|
# if GLM_ARCH & GLM_ARCH_SSE41_BIT |
|
|
|
|
return _mm_ceil_ps(x); |
|
|
|
|
# else |
|
|
|
|
__m128 const rnd0 = glm_f32v4_rnd(x); |
|
|
|
|
__m128 const rnd0 = glm_vec4_round(x); |
|
|
|
|
__m128 const cmp0 = _mm_cmpgt_ps(x, rnd0); |
|
|
|
|
__m128 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); |
|
|
|
|
__m128 const add0 = _mm_add_ps(rnd0, and0); |
|
|
|
@ -117,51 +114,51 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_ceil(__m128 x) |
|
|
|
|
# endif |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_frc(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_fract(__m128 x) |
|
|
|
|
{ |
|
|
|
|
__m128 const flr0 = glm_f32v4_flr(x); |
|
|
|
|
__m128 const flr0 = glm_vec4_floor(x); |
|
|
|
|
__m128 const sub0 = _mm_sub_ps(x, flr0); |
|
|
|
|
return sub0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_mod(__m128 x, __m128 y) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_mod(__m128 x, __m128 y) |
|
|
|
|
{ |
|
|
|
|
__m128 const div0 = _mm_div_ps(x, y); |
|
|
|
|
__m128 const flr0 = glm_f32v4_flr(div0); |
|
|
|
|
__m128 const flr0 = glm_vec4_floor(div0); |
|
|
|
|
__m128 const mul0 = _mm_mul_ps(y, flr0); |
|
|
|
|
__m128 const sub0 = _mm_sub_ps(x, mul0); |
|
|
|
|
return sub0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_clp(__m128 v, __m128 minVal, __m128 maxVal) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_clamp(__m128 v, __m128 minVal, __m128 maxVal) |
|
|
|
|
{ |
|
|
|
|
__m128 const min0 = _mm_min_ps(v, maxVal); |
|
|
|
|
__m128 const max0 = _mm_max_ps(min0, minVal); |
|
|
|
|
return max0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_mix(__m128 v1, __m128 v2, __m128 a) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_mix(__m128 v1, __m128 v2, __m128 a) |
|
|
|
|
{ |
|
|
|
|
__m128 const sub0 = _mm_sub_ps(_mm_set1_ps(1.0f), a); |
|
|
|
|
__m128 const mul0 = _mm_mul_ps(v1, sub0); |
|
|
|
|
__m128 const mad0 = glm_f32v4_mad(v2, a, mul0); |
|
|
|
|
__m128 const mad0 = glm_vec4_fma(v2, a, mul0); |
|
|
|
|
return mad0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
//step
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_stp(__m128 edge, __m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_step(__m128 edge, __m128 x) |
|
|
|
|
{ |
|
|
|
|
__m128 const cmp = _mm_cmple_ps(x, edge); |
|
|
|
|
return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_setzero_ps(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// smoothstep
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_ssp(__m128 edge0, __m128 edge1, __m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_smoothstep(__m128 edge0, __m128 edge1, __m128 x) |
|
|
|
|
{ |
|
|
|
|
__m128 const sub0 = _mm_sub_ps(x, edge0); |
|
|
|
|
__m128 const sub1 = _mm_sub_ps(edge1, edge0); |
|
|
|
|
__m128 const div0 = _mm_sub_ps(sub0, sub1); |
|
|
|
|
__m128 const clp0 = glm_f32v4_clp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f)); |
|
|
|
|
__m128 const clp0 = glm_vec4_clamp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f)); |
|
|
|
|
__m128 const mul0 = _mm_mul_ps(_mm_set1_ps(2.0f), clp0); |
|
|
|
|
__m128 const sub2 = _mm_sub_ps(_mm_set1_ps(3.0f), mul0); |
|
|
|
|
__m128 const mul1 = _mm_mul_ps(clp0, clp0); |
|
|
|
@ -170,7 +167,7 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_ssp(__m128 edge0, __m128 edge1, __m128 x) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Agner Fog method
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_nan(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_nan(__m128 x) |
|
|
|
|
{ |
|
|
|
|
__m128i const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
|
|
|
|
|
__m128i const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
|
|
|
|
@ -184,7 +181,7 @@ GLM_FUNC_QUALIFIER __m128 glm_f32v4_nan(__m128 x) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Agner Fog method
|
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_f32v4_inf(__m128 x) |
|
|
|
|
GLM_FUNC_QUALIFIER __m128 glm_vec4_inf(__m128 x) |
|
|
|
|
{ |
|
|
|
|
__m128i const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
|
|
|
|
|
__m128i const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
|
|
|
|
|