-
Notifications
You must be signed in to change notification settings - Fork 150
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(gpu): add gpu array type in hl api
- Loading branch information
1 parent
e363b76
commit 86505a1
Showing
10 changed files
with
1,123 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,300 @@ | ||
//! This module contains the implementation of the FheBool array backend | ||
//! where the values and computations are always done on GPU | ||
use super::super::helpers::{create_sub_mut_slice_with_bound, create_sub_slice_with_bound}; | ||
use super::super::traits::{BitwiseArrayBackend, ClearBitwiseArrayBackend}; | ||
use crate::array::stride::{ParStridedIter, ParStridedIterMut, StridedIter}; | ||
use crate::array::traits::TensorSlice; | ||
use crate::high_level_api::array::{ArrayBackend, BackendDataContainer, BackendDataContainerMut}; | ||
use crate::high_level_api::global_state; | ||
use crate::high_level_api::global_state::with_thread_local_cuda_streams; | ||
use crate::integer::gpu::ciphertext::boolean_value::CudaBooleanBlock; | ||
use crate::prelude::{FheDecrypt, FheTryEncrypt}; | ||
use crate::{ClientKey, FheBoolId}; | ||
use rayon::prelude::*; | ||
use std::ops::RangeBounds; | ||
|
||
pub struct GpuFheBoolArrayBackend; | ||
|
||
pub type GpuFheBoolArray = super::super::FheBackendArray<GpuFheBoolArrayBackend, FheBoolId>; | ||
pub type GpuFheBoolSlice<'a> = | ||
super::super::FheBackendArraySlice<'a, GpuFheBoolArrayBackend, FheBoolId>; | ||
pub type GpuFheBoolSliceMut<'a> = | ||
super::super::FheBackendArraySliceMut<'a, GpuFheBoolArrayBackend, FheBoolId>; | ||
|
||
pub struct GpuBooleanSlice<'a>(pub(crate) &'a [CudaBooleanBlock]); | ||
pub struct GpuBooleanSliceMut<'a>(pub(crate) &'a mut [CudaBooleanBlock]); | ||
pub struct GpuBooleanOwned(pub(crate) Vec<CudaBooleanBlock>); | ||
|
||
impl Clone for GpuBooleanOwned { | ||
fn clone(&self) -> Self { | ||
with_thread_local_cuda_streams(|streams| { | ||
Self(self.0.iter().map(|elem| elem.duplicate(streams)).collect()) | ||
}) | ||
} | ||
} | ||
|
||
impl ArrayBackend for GpuFheBoolArrayBackend { | ||
type Slice<'a> | ||
= GpuBooleanSlice<'a> | ||
where | ||
Self: 'a; | ||
type SliceMut<'a> | ||
= GpuBooleanSliceMut<'a> | ||
where | ||
Self: 'a; | ||
type Owned = GpuBooleanOwned; | ||
} | ||
|
||
impl<'a> TensorSlice<'a, GpuBooleanSlice<'a>> { | ||
pub fn iter(self) -> StridedIter<'a, CudaBooleanBlock> { | ||
StridedIter::new(self.slice.0, self.dims.clone()) | ||
} | ||
|
||
pub fn par_iter(self) -> ParStridedIter<'a, CudaBooleanBlock> { | ||
ParStridedIter::new(self.slice.0, self.dims.clone()) | ||
} | ||
} | ||
|
||
impl<'a> TensorSlice<'a, GpuBooleanSliceMut<'a>> { | ||
pub fn par_iter_mut(self) -> ParStridedIterMut<'a, CudaBooleanBlock> { | ||
ParStridedIterMut::new(self.slice.0, self.dims.clone()) | ||
} | ||
} | ||
|
||
impl From<Vec<CudaBooleanBlock>> for GpuBooleanOwned { | ||
fn from(value: Vec<CudaBooleanBlock>) -> Self { | ||
Self(value) | ||
} | ||
} | ||
|
||
impl<'a> BackendDataContainer for GpuBooleanSlice<'a> { | ||
type Backend = GpuFheBoolArrayBackend; | ||
|
||
fn len(&self) -> usize { | ||
<[CudaBooleanBlock]>::len(self.0) | ||
} | ||
|
||
fn as_sub_slice( | ||
&self, | ||
range: impl RangeBounds<usize>, | ||
) -> <Self::Backend as ArrayBackend>::Slice<'_> { | ||
GpuBooleanSlice(create_sub_slice_with_bound(self.0, range)) | ||
} | ||
|
||
fn into_owned(self) -> <Self::Backend as ArrayBackend>::Owned { | ||
with_thread_local_cuda_streams(|streams| { | ||
GpuBooleanOwned(self.0.iter().map(|elem| elem.duplicate(streams)).collect()) | ||
}) | ||
} | ||
} | ||
|
||
impl<'a> BackendDataContainer for GpuBooleanSliceMut<'a> { | ||
type Backend = GpuFheBoolArrayBackend; | ||
|
||
fn len(&self) -> usize { | ||
<[CudaBooleanBlock]>::len(self.0) | ||
} | ||
|
||
fn as_sub_slice( | ||
&self, | ||
range: impl RangeBounds<usize>, | ||
) -> <Self::Backend as ArrayBackend>::Slice<'_> { | ||
GpuBooleanSlice(create_sub_slice_with_bound(self.0, range)) | ||
} | ||
|
||
fn into_owned(self) -> <Self::Backend as ArrayBackend>::Owned { | ||
with_thread_local_cuda_streams(|streams| { | ||
GpuBooleanOwned(self.0.iter().map(|elem| elem.duplicate(streams)).collect()) | ||
}) | ||
} | ||
} | ||
|
||
impl<'a> BackendDataContainerMut for GpuBooleanSliceMut<'a> { | ||
fn as_sub_slice_mut( | ||
&mut self, | ||
range: impl RangeBounds<usize>, | ||
) -> <Self::Backend as ArrayBackend>::SliceMut<'_> { | ||
GpuBooleanSliceMut(create_sub_mut_slice_with_bound(self.0, range)) | ||
} | ||
} | ||
|
||
impl BackendDataContainer for GpuBooleanOwned { | ||
type Backend = GpuFheBoolArrayBackend; | ||
|
||
fn len(&self) -> usize { | ||
self.0.len() | ||
} | ||
|
||
fn as_sub_slice( | ||
&self, | ||
range: impl RangeBounds<usize>, | ||
) -> <Self::Backend as ArrayBackend>::Slice<'_> { | ||
GpuBooleanSlice(create_sub_slice_with_bound(self.0.as_slice(), range)) | ||
} | ||
|
||
fn into_owned(self) -> <Self::Backend as ArrayBackend>::Owned { | ||
self | ||
} | ||
} | ||
|
||
impl BackendDataContainerMut for GpuBooleanOwned { | ||
fn as_sub_slice_mut( | ||
&mut self, | ||
range: impl RangeBounds<usize>, | ||
) -> <Self::Backend as ArrayBackend>::SliceMut<'_> { | ||
GpuBooleanSliceMut(create_sub_mut_slice_with_bound( | ||
self.0.as_mut_slice(), | ||
range, | ||
)) | ||
} | ||
} | ||
|
||
impl BitwiseArrayBackend for GpuFheBoolArrayBackend { | ||
fn bitand<'a>( | ||
lhs: TensorSlice<'_, Self::Slice<'a>>, | ||
rhs: TensorSlice<'_, Self::Slice<'a>>, | ||
) -> Self::Owned { | ||
GpuBooleanOwned(global_state::with_cuda_internal_keys(|cuda_key| { | ||
with_thread_local_cuda_streams(|streams| { | ||
lhs.par_iter() | ||
.zip(rhs.par_iter()) | ||
.map(|(lhs, rhs)| CudaBooleanBlock(cuda_key.bitand(&lhs.0, &rhs.0, streams))) | ||
.collect::<Vec<_>>() | ||
}) | ||
})) | ||
} | ||
|
||
fn bitor<'a>( | ||
lhs: TensorSlice<'_, Self::Slice<'a>>, | ||
rhs: TensorSlice<'_, Self::Slice<'a>>, | ||
) -> Self::Owned { | ||
GpuBooleanOwned(global_state::with_cuda_internal_keys(|cuda_key| { | ||
with_thread_local_cuda_streams(|streams| { | ||
lhs.par_iter() | ||
.zip(rhs.par_iter()) | ||
.map(|(lhs, rhs)| CudaBooleanBlock(cuda_key.bitor(&lhs.0, &rhs.0, streams))) | ||
.collect::<Vec<_>>() | ||
}) | ||
})) | ||
} | ||
|
||
fn bitxor<'a>( | ||
lhs: TensorSlice<'_, Self::Slice<'a>>, | ||
rhs: TensorSlice<'_, Self::Slice<'a>>, | ||
) -> Self::Owned { | ||
GpuBooleanOwned(global_state::with_cuda_internal_keys(|cuda_key| { | ||
with_thread_local_cuda_streams(|streams| { | ||
lhs.par_iter() | ||
.zip(rhs.par_iter()) | ||
.map(|(lhs, rhs)| CudaBooleanBlock(cuda_key.bitxor(&lhs.0, &rhs.0, streams))) | ||
.collect::<Vec<_>>() | ||
}) | ||
})) | ||
} | ||
|
||
fn bitnot(lhs: TensorSlice<'_, Self::Slice<'_>>) -> Self::Owned { | ||
GpuBooleanOwned(global_state::with_cuda_internal_keys(|cuda_key| { | ||
with_thread_local_cuda_streams(|streams| { | ||
lhs.par_iter() | ||
.map(|lhs| CudaBooleanBlock(cuda_key.bitnot(&lhs.0, streams))) | ||
.collect::<Vec<_>>() | ||
}) | ||
})) | ||
} | ||
} | ||
|
||
impl ClearBitwiseArrayBackend<bool> for GpuFheBoolArrayBackend { | ||
fn bitand_slice( | ||
lhs: TensorSlice<'_, Self::Slice<'_>>, | ||
rhs: TensorSlice<'_, &'_ [bool]>, | ||
) -> Self::Owned { | ||
GpuBooleanOwned(global_state::with_cuda_internal_keys(|cuda_key| { | ||
with_thread_local_cuda_streams(|streams| { | ||
lhs.par_iter() | ||
.zip(rhs.par_iter().copied()) | ||
.map(|(lhs, rhs)| { | ||
CudaBooleanBlock(cuda_key.scalar_bitand(&lhs.0, rhs as u8, streams)) | ||
}) | ||
.collect::<Vec<_>>() | ||
}) | ||
})) | ||
} | ||
|
||
fn bitor_slice( | ||
lhs: TensorSlice<'_, Self::Slice<'_>>, | ||
rhs: TensorSlice<'_, &'_ [bool]>, | ||
) -> Self::Owned { | ||
GpuBooleanOwned(global_state::with_cuda_internal_keys(|cuda_key| { | ||
with_thread_local_cuda_streams(|streams| { | ||
lhs.par_iter() | ||
.zip(rhs.par_iter().copied()) | ||
.map(|(lhs, rhs)| { | ||
CudaBooleanBlock(cuda_key.scalar_bitor(&lhs.0, rhs as u8, streams)) | ||
}) | ||
.collect::<Vec<_>>() | ||
}) | ||
})) | ||
} | ||
|
||
fn bitxor_slice( | ||
lhs: TensorSlice<'_, Self::Slice<'_>>, | ||
rhs: TensorSlice<'_, &'_ [bool]>, | ||
) -> Self::Owned { | ||
GpuBooleanOwned(global_state::with_cuda_internal_keys(|cuda_key| { | ||
with_thread_local_cuda_streams(|streams| { | ||
lhs.par_iter() | ||
.zip(rhs.par_iter().copied()) | ||
.map(|(lhs, rhs)| { | ||
CudaBooleanBlock(cuda_key.scalar_bitxor(&lhs.0, rhs as u8, streams)) | ||
}) | ||
.collect::<Vec<_>>() | ||
}) | ||
})) | ||
} | ||
} | ||
|
||
impl FheTryEncrypt<&[bool], ClientKey> for GpuFheBoolArray { | ||
type Error = crate::Error; | ||
|
||
fn try_encrypt(values: &[bool], cks: &ClientKey) -> Result<Self, Self::Error> { | ||
let encrypted = with_thread_local_cuda_streams(|streams| { | ||
values | ||
.iter() | ||
.copied() | ||
.map(|value| { | ||
CudaBooleanBlock::from_boolean_block(&cks.key.key.encrypt_bool(value), streams) | ||
}) | ||
.collect::<Vec<_>>() | ||
}); | ||
Ok(Self::new(encrypted, vec![values.len()])) | ||
} | ||
} | ||
|
||
impl<'a> FheDecrypt<Vec<bool>> for GpuFheBoolSlice<'a> { | ||
fn decrypt(&self, key: &ClientKey) -> Vec<bool> { | ||
with_thread_local_cuda_streams(|streams| { | ||
self.elems | ||
.0 | ||
.iter() | ||
.map(|encrypted_value| { | ||
key.key | ||
.key | ||
.decrypt_bool(&encrypted_value.to_boolean_block(streams)) | ||
}) | ||
.collect() | ||
}) | ||
} | ||
} | ||
|
||
impl<'a> FheDecrypt<Vec<bool>> for GpuFheBoolSliceMut<'a> { | ||
fn decrypt(&self, key: &ClientKey) -> Vec<bool> { | ||
self.as_slice().decrypt(key) | ||
} | ||
} | ||
|
||
impl FheDecrypt<Vec<bool>> for GpuFheBoolArray { | ||
fn decrypt(&self, key: &ClientKey) -> Vec<bool> { | ||
self.as_slice().decrypt(key) | ||
} | ||
} |
Oops, something went wrong.