Edit

Share via


Avx512F.VL.ExpandLoad Method

Definition

Overloads

Name Description
ExpandLoad(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_expandloadu_epi32 (__m128i s, __mmask8 k, void const * a)

VPEXPANDD xmm1 {k1}{z}, m128

ExpandLoad(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_address_expandloadu_epi32 (__m256i s, __mmask8 k, void const * a)

VPEXPANDD ymm1 {k1}{z}, m256

ExpandLoad(Int64*, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_expandloadu_epi64 (__m128i s, __mmask8 k, void const * a)

VPEXPANDQ xmm1 {k1}{z}, m128

ExpandLoad(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_address_expandloadu_epi64 (__m256i s, __mmask8 k, void const * a)

VPEXPANDQ ymm1 {k1}{z}, m256

ExpandLoad(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_expandloadu_epi64 (__m128i s, __mmask8 k, void const * a)

VPEXPANDQ xmm1 {k1}{z}, m128

ExpandLoad(Single*, Vector256<Single>, Vector256<Single>)

__m256 _mm256_address_expandloadu_ps (__m256 s, __mmask8 k, void const * a)

VEXPANDPS ymm1 {k1}{z}, m256

ExpandLoad(Double*, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_expandloadu_pd (__m128d s, __mmask8 k, void const * a)

VEXPANDPD xmm1 {k1}{z}, m128

ExpandLoad(Int64*, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_address_expandloadu_epi64 (__m256i s, __mmask8 k, void const * a)

VPEXPANDQ ymm1 {k1}{z}, m256

ExpandLoad(Int32*, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_address_expandloadu_epi32 (__m256i s, __mmask8 k, void const * a)

VPEXPANDD ymm1 {k1}{z}, m256

ExpandLoad(Int32*, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_expandloadu_epi32 (__m128i s, __mmask8 k, void const * a)

VPEXPANDD xmm1 {k1}{z}, m128

ExpandLoad(Double*, Vector256<Double>, Vector256<Double>)

__m256d _mm256_address_expandloadu_pd (__m256d s, __mmask8 k, void const * a)

VEXPANDPD ymm1 {k1}{z}, m256

ExpandLoad(Single*, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_expandloadu_ps (__m128 s, __mmask8 k, void const * a)

VEXPANDPS xmm1 {k1}{z}, m128

ExpandLoad(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

Source:
Avx512F.cs

__m128i _mm_mask_expandloadu_epi32 (__m128i s, __mmask8 k, void const * a)

VPEXPANDD xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<System::UInt32> ExpandLoad(System::UInt32* address, System::Runtime::Intrinsics::Vector128<System::UInt32> mask, System::Runtime::Intrinsics::Vector128<System::UInt32> merge);
public static System.Runtime.Intrinsics.Vector128<uint> ExpandLoad(uint* address, System.Runtime.Intrinsics.Vector128<uint> mask, System.Runtime.Intrinsics.Vector128<uint> merge);
static member ExpandLoad : nativeptr<uint32> * System.Runtime.Intrinsics.Vector128<uint32> * System.Runtime.Intrinsics.Vector128<uint32> -> System.Runtime.Intrinsics.Vector128<uint32>

Parameters

address
UInt32*
merge
Vector128<UInt32>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

ExpandLoad(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

Source:
Avx512F.cs

__m256i _mm256_address_expandloadu_epi32 (__m256i s, __mmask8 k, void const * a)

VPEXPANDD ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<System::UInt32> ExpandLoad(System::UInt32* address, System::Runtime::Intrinsics::Vector256<System::UInt32> mask, System::Runtime::Intrinsics::Vector256<System::UInt32> merge);
public static System.Runtime.Intrinsics.Vector256<uint> ExpandLoad(uint* address, System.Runtime.Intrinsics.Vector256<uint> mask, System.Runtime.Intrinsics.Vector256<uint> merge);
static member ExpandLoad : nativeptr<uint32> * System.Runtime.Intrinsics.Vector256<uint32> * System.Runtime.Intrinsics.Vector256<uint32> -> System.Runtime.Intrinsics.Vector256<uint32>

Parameters

address
UInt32*
merge
Vector256<UInt32>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

ExpandLoad(Int64*, Vector128<Int64>, Vector128<Int64>)

Source:
Avx512F.cs

__m128i _mm_mask_expandloadu_epi64 (__m128i s, __mmask8 k, void const * a)

VPEXPANDQ xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<long> ExpandLoad(long* address, System::Runtime::Intrinsics::Vector128<long> mask, System::Runtime::Intrinsics::Vector128<long> merge);
public static System.Runtime.Intrinsics.Vector128<long> ExpandLoad(long* address, System.Runtime.Intrinsics.Vector128<long> mask, System.Runtime.Intrinsics.Vector128<long> merge);
static member ExpandLoad : nativeptr<int64> * System.Runtime.Intrinsics.Vector128<int64> * System.Runtime.Intrinsics.Vector128<int64> -> System.Runtime.Intrinsics.Vector128<int64>

Parameters

address
Int64*
merge
Vector128<Int64>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

ExpandLoad(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

Source:
Avx512F.cs

__m256i _mm256_address_expandloadu_epi64 (__m256i s, __mmask8 k, void const * a)

VPEXPANDQ ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<System::UInt64> ExpandLoad(System::UInt64* address, System::Runtime::Intrinsics::Vector256<System::UInt64> mask, System::Runtime::Intrinsics::Vector256<System::UInt64> merge);
public static System.Runtime.Intrinsics.Vector256<ulong> ExpandLoad(ulong* address, System.Runtime.Intrinsics.Vector256<ulong> mask, System.Runtime.Intrinsics.Vector256<ulong> merge);
static member ExpandLoad : nativeptr<uint64> * System.Runtime.Intrinsics.Vector256<uint64> * System.Runtime.Intrinsics.Vector256<uint64> -> System.Runtime.Intrinsics.Vector256<uint64>

Parameters

address
UInt64*
merge
Vector256<UInt64>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

ExpandLoad(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

Source:
Avx512F.cs

__m128i _mm_mask_expandloadu_epi64 (__m128i s, __mmask8 k, void const * a)

VPEXPANDQ xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<System::UInt64> ExpandLoad(System::UInt64* address, System::Runtime::Intrinsics::Vector128<System::UInt64> mask, System::Runtime::Intrinsics::Vector128<System::UInt64> merge);
public static System.Runtime.Intrinsics.Vector128<ulong> ExpandLoad(ulong* address, System.Runtime.Intrinsics.Vector128<ulong> mask, System.Runtime.Intrinsics.Vector128<ulong> merge);
static member ExpandLoad : nativeptr<uint64> * System.Runtime.Intrinsics.Vector128<uint64> * System.Runtime.Intrinsics.Vector128<uint64> -> System.Runtime.Intrinsics.Vector128<uint64>

Parameters

address
UInt64*
merge
Vector128<UInt64>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

ExpandLoad(Single*, Vector256<Single>, Vector256<Single>)

Source:
Avx512F.cs

__m256 _mm256_address_expandloadu_ps (__m256 s, __mmask8 k, void const * a)

VEXPANDPS ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<float> ExpandLoad(float* address, System::Runtime::Intrinsics::Vector256<float> mask, System::Runtime::Intrinsics::Vector256<float> merge);
public static System.Runtime.Intrinsics.Vector256<float> ExpandLoad(float* address, System.Runtime.Intrinsics.Vector256<float> mask, System.Runtime.Intrinsics.Vector256<float> merge);
static member ExpandLoad : nativeptr<single> * System.Runtime.Intrinsics.Vector256<single> * System.Runtime.Intrinsics.Vector256<single> -> System.Runtime.Intrinsics.Vector256<single>

Parameters

address
Single*
merge
Vector256<Single>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

ExpandLoad(Double*, Vector128<Double>, Vector128<Double>)

Source:
Avx512F.cs

__m128d _mm_mask_expandloadu_pd (__m128d s, __mmask8 k, void const * a)

VEXPANDPD xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<double> ExpandLoad(double* address, System::Runtime::Intrinsics::Vector128<double> mask, System::Runtime::Intrinsics::Vector128<double> merge);
public static System.Runtime.Intrinsics.Vector128<double> ExpandLoad(double* address, System.Runtime.Intrinsics.Vector128<double> mask, System.Runtime.Intrinsics.Vector128<double> merge);
static member ExpandLoad : nativeptr<double> * System.Runtime.Intrinsics.Vector128<double> * System.Runtime.Intrinsics.Vector128<double> -> System.Runtime.Intrinsics.Vector128<double>

Parameters

address
Double*
merge
Vector128<Double>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

ExpandLoad(Int64*, Vector256<Int64>, Vector256<Int64>)

Source:
Avx512F.cs

__m256i _mm256_address_expandloadu_epi64 (__m256i s, __mmask8 k, void const * a)

VPEXPANDQ ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<long> ExpandLoad(long* address, System::Runtime::Intrinsics::Vector256<long> mask, System::Runtime::Intrinsics::Vector256<long> merge);
public static System.Runtime.Intrinsics.Vector256<long> ExpandLoad(long* address, System.Runtime.Intrinsics.Vector256<long> mask, System.Runtime.Intrinsics.Vector256<long> merge);
static member ExpandLoad : nativeptr<int64> * System.Runtime.Intrinsics.Vector256<int64> * System.Runtime.Intrinsics.Vector256<int64> -> System.Runtime.Intrinsics.Vector256<int64>

Parameters

address
Int64*
merge
Vector256<Int64>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

ExpandLoad(Int32*, Vector256<Int32>, Vector256<Int32>)

Source:
Avx512F.cs

__m256i _mm256_address_expandloadu_epi32 (__m256i s, __mmask8 k, void const * a)

VPEXPANDD ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<int> ExpandLoad(int* address, System::Runtime::Intrinsics::Vector256<int> mask, System::Runtime::Intrinsics::Vector256<int> merge);
public static System.Runtime.Intrinsics.Vector256<int> ExpandLoad(int* address, System.Runtime.Intrinsics.Vector256<int> mask, System.Runtime.Intrinsics.Vector256<int> merge);
static member ExpandLoad : nativeptr<int> * System.Runtime.Intrinsics.Vector256<int> * System.Runtime.Intrinsics.Vector256<int> -> System.Runtime.Intrinsics.Vector256<int>

Parameters

address
Int32*
merge
Vector256<Int32>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

ExpandLoad(Int32*, Vector128<Int32>, Vector128<Int32>)

Source:
Avx512F.cs

__m128i _mm_mask_expandloadu_epi32 (__m128i s, __mmask8 k, void const * a)

VPEXPANDD xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<int> ExpandLoad(int* address, System::Runtime::Intrinsics::Vector128<int> mask, System::Runtime::Intrinsics::Vector128<int> merge);
public static System.Runtime.Intrinsics.Vector128<int> ExpandLoad(int* address, System.Runtime.Intrinsics.Vector128<int> mask, System.Runtime.Intrinsics.Vector128<int> merge);
static member ExpandLoad : nativeptr<int> * System.Runtime.Intrinsics.Vector128<int> * System.Runtime.Intrinsics.Vector128<int> -> System.Runtime.Intrinsics.Vector128<int>

Parameters

address
Int32*
merge
Vector128<Int32>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

ExpandLoad(Double*, Vector256<Double>, Vector256<Double>)

Source:
Avx512F.cs

__m256d _mm256_address_expandloadu_pd (__m256d s, __mmask8 k, void const * a)

VEXPANDPD ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<double> ExpandLoad(double* address, System::Runtime::Intrinsics::Vector256<double> mask, System::Runtime::Intrinsics::Vector256<double> merge);
public static System.Runtime.Intrinsics.Vector256<double> ExpandLoad(double* address, System.Runtime.Intrinsics.Vector256<double> mask, System.Runtime.Intrinsics.Vector256<double> merge);
static member ExpandLoad : nativeptr<double> * System.Runtime.Intrinsics.Vector256<double> * System.Runtime.Intrinsics.Vector256<double> -> System.Runtime.Intrinsics.Vector256<double>

Parameters

address
Double*
merge
Vector256<Double>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

ExpandLoad(Single*, Vector128<Single>, Vector128<Single>)

Source:
Avx512F.cs

__m128 _mm_mask_expandloadu_ps (__m128 s, __mmask8 k, void const * a)

VEXPANDPS xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<float> ExpandLoad(float* address, System::Runtime::Intrinsics::Vector128<float> mask, System::Runtime::Intrinsics::Vector128<float> merge);
public static System.Runtime.Intrinsics.Vector128<float> ExpandLoad(float* address, System.Runtime.Intrinsics.Vector128<float> mask, System.Runtime.Intrinsics.Vector128<float> merge);
static member ExpandLoad : nativeptr<single> * System.Runtime.Intrinsics.Vector128<single> * System.Runtime.Intrinsics.Vector128<single> -> System.Runtime.Intrinsics.Vector128<single>

Parameters

address
Single*
merge
Vector128<Single>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to