summaryrefslogtreecommitdiff
path: root/vendor/fixedbitset/src/block
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/fixedbitset/src/block')
-rw-r--r--vendor/fixedbitset/src/block/avx.rs92
-rw-r--r--vendor/fixedbitset/src/block/avx2.rs88
-rw-r--r--vendor/fixedbitset/src/block/default.rs70
-rw-r--r--vendor/fixedbitset/src/block/mod.rs114
-rw-r--r--vendor/fixedbitset/src/block/sse2.rs104
-rw-r--r--vendor/fixedbitset/src/block/wasm.rs80
6 files changed, 548 insertions, 0 deletions
diff --git a/vendor/fixedbitset/src/block/avx.rs b/vendor/fixedbitset/src/block/avx.rs
new file mode 100644
index 00000000..33ba100f
--- /dev/null
+++ b/vendor/fixedbitset/src/block/avx.rs
@@ -0,0 +1,92 @@
+#[cfg(target_arch = "x86")]
+use core::arch::x86::*;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not};
+
+#[derive(Copy, Clone, Debug)]
+#[repr(transparent)]
+pub struct Block(pub(super) __m256d);
+
+impl Block {
+ #[inline]
+ pub fn is_empty(self) -> bool {
+ unsafe {
+ let value = _mm256_castpd_si256(self.0);
+ _mm256_testz_si256(value, value) == 1
+ }
+ }
+
+ #[inline]
+ pub fn andnot(self, other: Self) -> Self {
+ unsafe { Self(_mm256_andnot_pd(other.0, self.0)) }
+ }
+}
+
+impl Not for Block {
+ type Output = Block;
+ #[inline]
+ fn not(self) -> Self::Output {
+ unsafe { Self(_mm256_xor_pd(self.0, Self::ALL.0)) }
+ }
+}
+
+impl BitAnd for Block {
+ type Output = Block;
+ #[inline]
+ fn bitand(self, other: Self) -> Self::Output {
+ unsafe { Self(_mm256_and_pd(self.0, other.0)) }
+ }
+}
+
+impl BitAndAssign for Block {
+ #[inline]
+ fn bitand_assign(&mut self, other: Self) {
+ unsafe {
+ self.0 = _mm256_and_pd(self.0, other.0);
+ }
+ }
+}
+
+impl BitOr for Block {
+ type Output = Block;
+ #[inline]
+ fn bitor(self, other: Self) -> Self::Output {
+ unsafe { Self(_mm256_or_pd(self.0, other.0)) }
+ }
+}
+
+impl BitOrAssign for Block {
+ #[inline]
+ fn bitor_assign(&mut self, other: Self) {
+ unsafe {
+ self.0 = _mm256_or_pd(self.0, other.0);
+ }
+ }
+}
+
+impl BitXor for Block {
+ type Output = Block;
+ #[inline]
+ fn bitxor(self, other: Self) -> Self::Output {
+ unsafe { Self(_mm256_xor_pd(self.0, other.0)) }
+ }
+}
+
+impl BitXorAssign for Block {
+ #[inline]
+ fn bitxor_assign(&mut self, other: Self) {
+ unsafe { self.0 = _mm256_xor_pd(self.0, other.0) }
+ }
+}
+
+impl PartialEq for Block {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ unsafe {
+ let new = _mm256_xor_pd(self.0, other.0);
+ let neq = _mm256_castpd_si256(new);
+ _mm256_testz_si256(neq, neq) == 1
+ }
+ }
+}
diff --git a/vendor/fixedbitset/src/block/avx2.rs b/vendor/fixedbitset/src/block/avx2.rs
new file mode 100644
index 00000000..b3593773
--- /dev/null
+++ b/vendor/fixedbitset/src/block/avx2.rs
@@ -0,0 +1,88 @@
+#[cfg(target_arch = "x86")]
+use core::arch::x86::*;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not};
+
+#[derive(Copy, Clone, Debug)]
+#[repr(transparent)]
+pub struct Block(pub(super) __m256i);
+
+impl Block {
+ #[inline]
+ pub fn is_empty(self) -> bool {
+ unsafe { _mm256_testz_si256(self.0, self.0) == 1 }
+ }
+
+ #[inline]
+ pub fn andnot(self, other: Self) -> Self {
+ Self(unsafe { _mm256_andnot_si256(other.0, self.0) })
+ }
+}
+
+impl Not for Block {
+ type Output = Block;
+ #[inline]
+ fn not(self) -> Self::Output {
+ unsafe { Self(_mm256_xor_si256(self.0, Self::ALL.0)) }
+ }
+}
+
+impl BitAnd for Block {
+ type Output = Block;
+ #[inline]
+ fn bitand(self, other: Self) -> Self::Output {
+ unsafe { Self(_mm256_and_si256(self.0, other.0)) }
+ }
+}
+
+impl BitAndAssign for Block {
+ #[inline]
+ fn bitand_assign(&mut self, other: Self) {
+ unsafe {
+ self.0 = _mm256_and_si256(self.0, other.0);
+ }
+ }
+}
+
+impl BitOr for Block {
+ type Output = Block;
+ #[inline]
+ fn bitor(self, other: Self) -> Self::Output {
+ unsafe { Self(_mm256_or_si256(self.0, other.0)) }
+ }
+}
+
+impl BitOrAssign for Block {
+ #[inline]
+ fn bitor_assign(&mut self, other: Self) {
+ unsafe {
+ self.0 = _mm256_or_si256(self.0, other.0);
+ }
+ }
+}
+
+impl BitXor for Block {
+ type Output = Block;
+ #[inline]
+ fn bitxor(self, other: Self) -> Self::Output {
+ unsafe { Self(_mm256_xor_si256(self.0, other.0)) }
+ }
+}
+
+impl BitXorAssign for Block {
+ #[inline]
+ fn bitxor_assign(&mut self, other: Self) {
+ unsafe { self.0 = _mm256_xor_si256(self.0, other.0) }
+ }
+}
+
+impl PartialEq for Block {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ unsafe {
+ let neq = _mm256_xor_si256(self.0, other.0);
+ _mm256_testz_si256(neq, neq) == 1
+ }
+ }
+}
diff --git a/vendor/fixedbitset/src/block/default.rs b/vendor/fixedbitset/src/block/default.rs
new file mode 100644
index 00000000..7fc460fb
--- /dev/null
+++ b/vendor/fixedbitset/src/block/default.rs
@@ -0,0 +1,70 @@
+use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not};
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+#[repr(transparent)]
+pub struct Block(pub(super) usize);
+
+impl Block {
+ #[inline]
+ pub const fn is_empty(self) -> bool {
+ self.0 == Self::NONE.0
+ }
+
+ #[inline]
+ pub fn andnot(self, other: Self) -> Self {
+ Self(!other.0 & self.0)
+ }
+}
+
+impl Not for Block {
+ type Output = Block;
+ #[inline]
+ fn not(self) -> Self::Output {
+ Self(self.0.not())
+ }
+}
+
+impl BitAnd for Block {
+ type Output = Block;
+ #[inline]
+ fn bitand(self, other: Self) -> Self::Output {
+ Self(self.0.bitand(other.0))
+ }
+}
+
+impl BitAndAssign for Block {
+ #[inline]
+ fn bitand_assign(&mut self, other: Self) {
+ self.0.bitand_assign(other.0);
+ }
+}
+
+impl BitOr for Block {
+ type Output = Block;
+ #[inline]
+ fn bitor(self, other: Self) -> Self::Output {
+ Self(self.0.bitor(other.0))
+ }
+}
+
+impl BitOrAssign for Block {
+ #[inline]
+ fn bitor_assign(&mut self, other: Self) {
+ self.0.bitor_assign(other.0)
+ }
+}
+
+impl BitXor for Block {
+ type Output = Block;
+ #[inline]
+ fn bitxor(self, other: Self) -> Self::Output {
+ Self(self.0.bitxor(other.0))
+ }
+}
+
+impl BitXorAssign for Block {
+ #[inline]
+ fn bitxor_assign(&mut self, other: Self) {
+ self.0.bitxor_assign(other.0)
+ }
+}
diff --git a/vendor/fixedbitset/src/block/mod.rs b/vendor/fixedbitset/src/block/mod.rs
new file mode 100644
index 00000000..ae7c2220
--- /dev/null
+++ b/vendor/fixedbitset/src/block/mod.rs
@@ -0,0 +1,114 @@
+#![allow(clippy::undocumented_unsafe_blocks)]
+#![allow(dead_code)]
+// TODO: Remove once the transmutes are fixed
+#![allow(unknown_lints)]
+#![allow(clippy::missing_transmute_annotations)]
+
+use core::cmp::Ordering;
+use core::hash::{Hash, Hasher};
+
+#[cfg(all(
+ not(all(target_family = "wasm", target_feature = "simd128")),
+ not(target_feature = "sse2"),
+ not(target_feature = "avx"),
+ not(target_feature = "avx2"),
+))]
+mod default;
+#[cfg(all(
+ not(all(target_family = "wasm", target_feature = "simd128")),
+ not(target_feature = "sse2"),
+ not(target_feature = "avx"),
+ not(target_feature = "avx2"),
+))]
+pub use self::default::*;
+
+#[cfg(all(
+ any(target_arch = "x86", target_arch = "x86_64"),
+ target_feature = "sse2",
+ not(target_feature = "avx"),
+ not(target_feature = "avx2"),
+))]
+mod sse2;
+#[cfg(all(
+ any(target_arch = "x86", target_arch = "x86_64"),
+ target_feature = "sse2",
+ not(target_feature = "avx"),
+ not(target_feature = "avx2"),
+))]
+pub use self::sse2::*;
+
+#[cfg(all(
+ any(target_arch = "x86", target_arch = "x86_64"),
+ target_feature = "avx",
+ not(target_feature = "avx2")
+))]
+mod avx;
+#[cfg(all(
+ any(target_arch = "x86", target_arch = "x86_64"),
+ target_feature = "avx",
+ not(target_feature = "avx2")
+))]
+pub use self::avx::*;
+
+#[cfg(all(
+ any(target_arch = "x86", target_arch = "x86_64"),
+ target_feature = "avx2"
+))]
+mod avx2;
+#[cfg(all(
+ any(target_arch = "x86", target_arch = "x86_64"),
+ target_feature = "avx2"
+))]
+pub use self::avx2::*;
+
+#[cfg(all(target_family = "wasm", target_feature = "simd128"))]
+mod wasm;
+#[cfg(all(target_family = "wasm", target_feature = "simd128"))]
+pub use self::wasm::*;
+
+impl Block {
+ pub const USIZE_COUNT: usize = core::mem::size_of::<Self>() / core::mem::size_of::<usize>();
+ pub const NONE: Self = Self::from_usize_array([0; Self::USIZE_COUNT]);
+ pub const ALL: Self = Self::from_usize_array([usize::MAX; Self::USIZE_COUNT]);
+ pub const BITS: usize = core::mem::size_of::<Self>() * 8;
+
+ #[inline]
+ pub fn into_usize_array(self) -> [usize; Self::USIZE_COUNT] {
+ unsafe { core::mem::transmute(self.0) }
+ }
+
+ #[inline]
+ pub const fn from_usize_array(array: [usize; Self::USIZE_COUNT]) -> Self {
+ Self(unsafe { core::mem::transmute(array) })
+ }
+}
+
+impl Eq for Block {}
+
+impl PartialOrd for Block {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for Block {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.into_usize_array().cmp(&other.into_usize_array())
+ }
+}
+
+impl Default for Block {
+ #[inline]
+ fn default() -> Self {
+ Self::NONE
+ }
+}
+
+impl Hash for Block {
+ #[inline]
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ Hash::hash_slice(&self.into_usize_array(), hasher);
+ }
+}
diff --git a/vendor/fixedbitset/src/block/sse2.rs b/vendor/fixedbitset/src/block/sse2.rs
new file mode 100644
index 00000000..6db08f7e
--- /dev/null
+++ b/vendor/fixedbitset/src/block/sse2.rs
@@ -0,0 +1,104 @@
+#![allow(clippy::undocumented_unsafe_blocks)]
+
+#[cfg(target_arch = "x86")]
+use core::arch::x86::*;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64::*;
+use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not};
+
+#[derive(Copy, Clone, Debug)]
+#[repr(transparent)]
+pub struct Block(pub(super) __m128i);
+
+impl Block {
+ #[inline]
+ pub fn is_empty(self) -> bool {
+ #[cfg(not(target_feature = "sse4.1"))]
+ {
+ self == Self::NONE
+ }
+ #[cfg(target_feature = "sse4.1")]
+ {
+ unsafe { _mm_test_all_zeros(self.0, self.0) == 1 }
+ }
+ }
+
+ #[inline]
+ pub fn andnot(self, other: Self) -> Self {
+ Self(unsafe { _mm_andnot_si128(other.0, self.0) })
+ }
+}
+
+impl Not for Block {
+ type Output = Block;
+ #[inline]
+ fn not(self) -> Self::Output {
+ unsafe { Self(_mm_xor_si128(self.0, Self::ALL.0)) }
+ }
+}
+
+impl BitAnd for Block {
+ type Output = Block;
+ #[inline]
+ fn bitand(self, other: Self) -> Self::Output {
+ unsafe { Self(_mm_and_si128(self.0, other.0)) }
+ }
+}
+
+impl BitAndAssign for Block {
+ #[inline]
+ fn bitand_assign(&mut self, other: Self) {
+ unsafe {
+ self.0 = _mm_and_si128(self.0, other.0);
+ }
+ }
+}
+
+impl BitOr for Block {
+ type Output = Block;
+ #[inline]
+ fn bitor(self, other: Self) -> Self::Output {
+ unsafe { Self(_mm_or_si128(self.0, other.0)) }
+ }
+}
+
+impl BitOrAssign for Block {
+ #[inline]
+ fn bitor_assign(&mut self, other: Self) {
+ unsafe {
+ self.0 = _mm_or_si128(self.0, other.0);
+ }
+ }
+}
+
+impl BitXor for Block {
+ type Output = Block;
+ #[inline]
+ fn bitxor(self, other: Self) -> Self::Output {
+ unsafe { Self(_mm_xor_si128(self.0, other.0)) }
+ }
+}
+
+impl BitXorAssign for Block {
+ #[inline]
+ fn bitxor_assign(&mut self, other: Self) {
+ unsafe { self.0 = _mm_xor_si128(self.0, other.0) }
+ }
+}
+
+impl PartialEq for Block {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ unsafe {
+ #[cfg(not(target_feature = "sse4.1"))]
+ {
+ _mm_movemask_epi8(_mm_cmpeq_epi8(self.0, other.0)) == 0xffff
+ }
+ #[cfg(target_feature = "sse4.1")]
+ {
+ let neq = _mm_xor_si128(self.0, other.0);
+ _mm_test_all_zeros(neq, neq) == 1
+ }
+ }
+ }
+}
diff --git a/vendor/fixedbitset/src/block/wasm.rs b/vendor/fixedbitset/src/block/wasm.rs
new file mode 100644
index 00000000..f823d86a
--- /dev/null
+++ b/vendor/fixedbitset/src/block/wasm.rs
@@ -0,0 +1,80 @@
+use core::{
+ arch::wasm32::*,
+ ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not},
+};
+
+#[derive(Copy, Clone, Debug)]
+#[repr(transparent)]
+pub struct Block(pub(super) v128);
+
+impl Block {
+ #[inline]
+ pub fn is_empty(self) -> bool {
+ !v128_any_true(self.0)
+ }
+
+ #[inline]
+ pub fn andnot(self, other: Self) -> Self {
+ Self(v128_andnot(self.0, other.0))
+ }
+}
+
+impl Not for Block {
+ type Output = Block;
+ #[inline]
+ fn not(self) -> Self::Output {
+ Self(v128_xor(self.0, Self::ALL.0))
+ }
+}
+
+impl BitAnd for Block {
+ type Output = Block;
+ #[inline]
+ fn bitand(self, other: Self) -> Self::Output {
+ Self(v128_and(self.0, other.0))
+ }
+}
+
+impl BitAndAssign for Block {
+ #[inline]
+ fn bitand_assign(&mut self, other: Self) {
+ self.0 = v128_and(self.0, other.0);
+ }
+}
+
+impl BitOr for Block {
+ type Output = Block;
+ #[inline]
+ fn bitor(self, other: Self) -> Self::Output {
+ Self(v128_or(self.0, other.0))
+ }
+}
+
+impl BitOrAssign for Block {
+ #[inline]
+ fn bitor_assign(&mut self, other: Self) {
+ self.0 = v128_or(self.0, other.0);
+ }
+}
+
+impl BitXor for Block {
+ type Output = Block;
+ #[inline]
+ fn bitxor(self, other: Self) -> Self::Output {
+ Self(v128_xor(self.0, other.0))
+ }
+}
+
+impl BitXorAssign for Block {
+ #[inline]
+ fn bitxor_assign(&mut self, other: Self) {
+ self.0 = v128_xor(self.0, other.0)
+ }
+}
+
+impl PartialEq for Block {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ !v128_any_true(v128_xor(self.0, other.0))
+ }
+}