core.simd
Source core/simd.d
- template
Vector(T) - Create a vector type.
Parameters T = one of double[2], float[4], void[16], byte[16], ubyte[16], short[8], ushort[8], int[4], uint[4], long[2], ulong[2]. For 256 bit vectors, one of double[4], float[8], void[32], byte[32], ubyte[32], short[16], ushort[16], int[8], uint[8], long[4], ulong[4]
- alias
void16= __vector(void[16]); - alias
double2= __vector(double[2]); - alias
float4= __vector(float[4]); - alias
byte16= __vector(byte[16]); - alias
ubyte16= __vector(ubyte[16]); - alias
short8= __vector(short[8]); - alias
ushort8= __vector(ushort[8]); - alias
int4= __vector(int[4]); - alias
uint4= __vector(uint[4]); - alias
long2= __vector(long[2]); - alias
ulong2= __vector(ulong[2]); - enum
XMM: int; - XMM opcodes that conform to the following:opcode xmm1,xmm2/mem and do not have side effects (i.e. do not write to memory).
- pure @safe V1
simd(XMM opcode, V1, V2)(V1op1, V2op2)
if (is(V1 == __vector) && is(V2 == __vector)); - Generate two operand instruction with XMM 128 bit operands.This is a compiler magic function - it doesn't behave like regular D functions.
Parameters opcode any of the XMM opcodes; it must be a compile time constant op1 first operand op2 second operand
Returns:result of opcode - pure @safe V1
simd(XMM opcode, V1)(V1op1)
if (is(V1 == __vector)); - Unary SIMD instructions.
- pure @safe V1
simd(XMM opcode, V1)(doubled)
if (is(V1 == __vector)); - pure @safe V1
simd(XMM opcode, V1)(floatf)
if (is(V1 == __vector)); - pure @safe V1
simd(XMM opcode, ubyte imm8, V1, V2)(V1op1, V2op2)
if (is(V1 == __vector) && is(V2 == __vector)); - For instructions: CMPPD, CMPSS, CMPSD, CMPPS, PSHUFD, PSHUFHW, PSHUFLW, BLENDPD, BLENDPS, DPPD, DPPS, MPSADBW, PBLENDW, ROUNDPD, ROUNDPS, ROUNDSD, ROUNDSS
Parameters opcode any of the above XMM opcodes; it must be a compile time constant op1 first operand op2 second operand imm8 third operand; must be a compile time constant
Returns:result of opcode - pure @safe V1
simd(XMM opcode, ubyte imm8, V1)(V1op1)
if (is(V1 == __vector)); - For instructions with the imm8 version: PSLLD, PSLLQ, PSLLW, PSRAD, PSRAW, PSRLD, PSRLQ, PSRLW, PSRLDQ, PSLLDQ
Parameters opcode any of the XMM opcodes; it must be a compile time constant op1 first operand imm8 second operand; must be a compile time constant
Returns:result of opcode - @safe V1
simd_sto(XMM opcode, V1, V2)(V1op1, V2op2)
if (is(V1 == __vector) && is(V2 == __vector)); - For "store" operations of the form: op1 op= op2Returns:op2 These cannot be marked as pure, as semantic() doesn't check them.
- @safe V1
simd_stod(XMM opcode, V1, V2)(doubleop1, V1op2)
if (is(V1 == __vector)); - @safe V1
simd_stof(XMM opcode, V1)(floatop1, V1op2)
if (is(V1 == __vector)); - void
prefetch(bool writeFetch, ubyte locality)(const(void)*address); - Emit prefetch instruction.Parameters:
const(void)* addressaddress to be prefetched writeFetch true for write fetch, false for read fetch locality 0..3 (0 meaning least local, 3 meaning most local) Note The Intel mappings are:
writeFetch locality Instruction false 0 prefetchnta false 1 prefetch2 false 2 prefetch1 false 3 prefetch0 false 0 prefetchw false 1 prefetchw false 2 prefetchw false 3 prefetchw - V
loadUnaligned(V)(const V*p)
if (is(V == void16) || is(V == byte16) || is(V == ubyte16) || is(V == short8) || is(V == ushort8) || is(V == int4) || is(V == uint4) || is(V == long2) || is(V == ulong2)); - Load unaligned vector from address. This is a compiler intrinsic.Parameters:
V* ppointer to vector Returns:vector - V
storeUnaligned(V)(V*p, Vvalue)
if (is(V == void16) || is(V == byte16) || is(V == ubyte16) || is(V == short8) || is(V == ushort8) || is(V == int4) || is(V == uint4) || is(V == long2) || is(V == ulong2)); - Store vector to unaligned address. This is a compiler intrinsic.Parameters:
V* ppointer to vector V valuevalue to store Returns:value