diff --git a/src/data_repr.rs b/src/data_repr.rs
index 6630f9ddf..7ab401177 100644
--- a/src/data_repr.rs
+++ b/src/data_repr.rs
@@ -1,10 +1,10 @@
+use crate::extension::nonnull;
+use alloc::borrow::ToOwned;
+use alloc::slice;
+use alloc::vec::Vec;
use std::mem;
use std::mem::ManuallyDrop;
use std::ptr::NonNull;
-use alloc::slice;
-use alloc::borrow::ToOwned;
-use alloc::vec::Vec;
-use crate::extension::nonnull;
use rawpointer::PointerExt;
@@ -30,11 +30,7 @@ impl OwnedRepr {
let len = v.len();
let capacity = v.capacity();
let ptr = nonnull::nonnull_from_vec_data(&mut v);
- Self {
- ptr,
- len,
- capacity,
- }
+ Self { ptr, len, capacity }
}
pub(crate) fn into_vec(self) -> Vec {
@@ -42,12 +38,12 @@ impl OwnedRepr {
}
pub(crate) fn as_slice(&self) -> &[A] {
- unsafe {
- slice::from_raw_parts(self.ptr.as_ptr(), self.len)
- }
+ unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
}
- pub(crate) fn len(&self) -> usize { self.len }
+ pub(crate) fn len(&self) -> usize {
+ self.len
+ }
pub(crate) fn as_ptr(&self) -> *const A {
self.ptr.as_ptr()
@@ -63,13 +59,11 @@ impl OwnedRepr {
/// Return end pointer
pub(crate) fn as_end_nonnull(&self) -> NonNull {
- unsafe {
- self.ptr.add(self.len)
- }
+ unsafe { self.ptr.add(self.len) }
}
/// Reserve `additional` elements; return the new pointer
- ///
+ ///
/// ## Safety
///
/// Note that existing pointers into the data are invalidated
@@ -82,6 +76,21 @@ impl OwnedRepr {
self.as_nonnull_mut()
}
+ /// Shrink the capacity of the array with lower bound.
+ /// The capacity will remain at least as large as both the length and
+ /// supplied value.
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ pub(crate) fn shrink_to_fit(&mut self, len: usize) {
+ if len < self.len {
+ self.len = len;
+ self.modify_as_vec(|mut v| {
+ v.shrink_to_fit();
+ v
+ });
+ self.capacity = len;
+ }
+ }
+
/// Set the valid length of the data
///
/// ## Safety
@@ -126,14 +135,13 @@ impl OwnedRepr {
let len = self.len;
self.len = 0;
self.capacity = 0;
- unsafe {
- Vec::from_raw_parts(self.ptr.as_ptr(), len, capacity)
- }
+ unsafe { Vec::from_raw_parts(self.ptr.as_ptr(), len, capacity) }
}
}
impl Clone for OwnedRepr
- where A: Clone
+where
+ A: Clone,
{
fn clone(&self) -> Self {
Self::from(self.as_slice().to_owned())
@@ -174,6 +182,5 @@ impl Drop for OwnedRepr {
}
}
-unsafe impl Sync for OwnedRepr where A: Sync { }
-unsafe impl Send for OwnedRepr where A: Send { }
-
+unsafe impl Sync for OwnedRepr where A: Sync {}
+unsafe impl Send for OwnedRepr where A: Send {}
diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs
index 8cfb82b55..b18a74142 100644
--- a/src/impl_owned_array.rs
+++ b/src/impl_owned_array.rs
@@ -1,4 +1,3 @@
-
use alloc::vec::Vec;
use std::mem;
use std::mem::MaybeUninit;
@@ -11,6 +10,7 @@ use crate::dimension;
use crate::error::{ErrorKind, ShapeError};
use crate::iterators::Baseiter;
use crate::low_level_util::AbortIfPanic;
+use crate::ArrayViewMut;
use crate::OwnedRepr;
use crate::Zip;
@@ -166,7 +166,8 @@ impl Array {
}
impl Array
- where D: Dimension
+where
+ D: Dimension,
{
/// Move all elements from self into `new_array`, which must be of the same shape but
/// can have a different memory layout. The destination is overwritten completely.
@@ -198,9 +199,7 @@ impl Array
} else {
// If `A` doesn't need drop, we can overwrite the destination.
// Safe because: move_into_uninit only writes initialized values
- unsafe {
- self.move_into_uninit(new_array.into_maybe_uninit())
- }
+ unsafe { self.move_into_uninit(new_array.into_maybe_uninit()) }
}
}
@@ -209,7 +208,8 @@ impl Array
// Afterwards, `self` drops full of initialized values and dropping works as usual.
// This avoids moving out of owned values in `self` while at the same time managing
// the dropping if the values being overwritten in `new_array`.
- Zip::from(&mut self).and(new_array)
+ Zip::from(&mut self)
+ .and(new_array)
.for_each(|src, dst| mem::swap(src, dst));
}
@@ -401,16 +401,87 @@ impl Array
/// [0., 0., 0., 0.],
/// [1., 1., 1., 1.]]);
/// ```
- pub fn push(&mut self, axis: Axis, array: ArrayView)
- -> Result<(), ShapeError>
+ pub fn push(&mut self, axis: Axis, array: ArrayView) -> Result<(), ShapeError>
where
A: Clone,
D: RemoveAxis,
{
// same-dimensionality conversion
- self.append(axis, array.insert_axis(axis).into_dimensionality::().unwrap())
+ self.append(
+ axis,
+ array.insert_axis(axis).into_dimensionality::().unwrap(),
+ )
+ }
+
+ /// Calculates the distance from `self.ptr` to `self.data.ptr`.
+ unsafe fn offset_from_data_ptr_to_logical_ptr(&self) -> isize {
+ if std::mem::size_of::() != 0 {
+ self.as_ptr().offset_from(self.data.as_ptr())
+ } else {
+ 0
+ }
}
+ /// Shrinks the capacity of the array as much as possible.
+ ///
+ /// ```
+ /// use ndarray::array;
+ /// use ndarray::s;
+ ///
+ /// let a = array![[0, 1, 2], [3, 4, 5], [6, 7,8]];
+ /// let mut a = a.slice_move(s![.., 0..2]);
+ /// let b = a.clone();
+ /// a.shrink_to_fit();
+ /// assert_eq!(a, b);
+ /// ```
+ pub fn shrink_to_fit(&mut self)
+ where
+ A: Copy,
+ {
+ let dim = self.dim.clone();
+ let strides = self.strides.clone();
+ let mut shrinked_stride = D::zeros(dim.ndim());
+
+ // Calculate the new stride after shrink
+ // Even after shrink, the order of stride size is maintained.
+ // For example, if dim is [3, 2, 3] and stride is [1, 9, 3], the default
+ // stride will be [6, 3, 1], but the stride order will be [1, 6, 3]
+ // because the size order of the original stride is maintained.
+ let mut stride_order = (0..dim.ndim()).collect::>();
+ stride_order.sort_unstable_by(|&i, &j| strides[i].cmp(&strides[j]));
+ let mut stride_ = 1;
+ for i in stride_order.iter() {
+ shrinked_stride[*i] = stride_;
+ stride_ *= dim[*i];
+ }
+
+ // Calculate which index in shrinked_stride from the pointer offset.
+ let mut stride_order_order = (0..dim.ndim()).collect::>();
+ stride_order_order.sort_unstable_by(|&i, &j| stride_order[j].cmp(&stride_order[i]));
+ let offset_stride = |offset: usize| {
+ let mut offset = offset;
+ let mut index = D::zeros(dim.ndim());
+ for i in stride_order_order.iter() {
+ index[*i] = offset / shrinked_stride[*i];
+ offset %= shrinked_stride[*i];
+ }
+ index
+ };
+
+ // Change the memory order only if it needs to be changed.
+ let ptr_offset = unsafe { self.offset_from_data_ptr_to_logical_ptr() };
+ self.ptr = unsafe { self.ptr.sub(ptr_offset as usize) };
+ for offset in 0..self.len() {
+ let index = offset_stride(offset);
+ let old_offset = dim.stride_offset_checked(&strides, &index).unwrap();
+ unsafe {
+ *self.ptr.as_ptr().add(offset as usize) =
+ *self.ptr.as_ptr().add((old_offset + ptr_offset) as usize);
+ }
+ }
+ self.strides = shrinked_stride;
+ self.data.shrink_to_fit(self.len());
+ }
/// Append an array to the array along an axis.
///
@@ -462,8 +533,7 @@ impl Array
/// [1., 1., 1., 1.],
/// [1., 1., 1., 1.]]);
/// ```
- pub fn append(&mut self, axis: Axis, mut array: ArrayView)
- -> Result<(), ShapeError>
+ pub fn append(&mut self, axis: Axis, mut array: ArrayView) -> Result<(), ShapeError>
where
A: Clone,
D: RemoveAxis,
@@ -556,7 +626,11 @@ impl Array
acc
} else {
let this_ax = ax.len as isize * ax.stride.abs();
- if this_ax > acc { this_ax } else { acc }
+ if this_ax > acc {
+ this_ax
+ } else {
+ acc
+ }
}
});
let mut strides = self.strides.clone();
@@ -574,7 +648,10 @@ impl Array
0
};
debug_assert!(data_to_array_offset >= 0);
- self.ptr = self.data.reserve(len_to_append).offset(data_to_array_offset);
+ self.ptr = self
+ .data
+ .reserve(len_to_append)
+ .offset(data_to_array_offset);
// clone elements from view to the array now
//
@@ -608,10 +685,13 @@ impl Array
if tail_view.ndim() > 1 {
sort_axes_in_default_order_tandem(&mut tail_view, &mut array);
- debug_assert!(tail_view.is_standard_layout(),
- "not std layout dim: {:?}, strides: {:?}",
- tail_view.shape(), tail_view.strides());
- }
+ debug_assert!(
+ tail_view.is_standard_layout(),
+ "not std layout dim: {:?}, strides: {:?}",
+ tail_view.shape(),
+ tail_view.strides()
+ );
+ }
// Keep track of currently filled length of `self.data` and update it
// on scope exit (panic or loop finish). This "indirect" way to
@@ -635,7 +715,6 @@ impl Array
data: &mut self.data,
};
-
// Safety: tail_view is constructed to have the same shape as array
Zip::from(tail_view)
.and_unchecked(array)
@@ -665,8 +744,11 @@ impl Array
///
/// This is an internal function for use by move_into and IntoIter only, safety invariants may need
/// to be upheld across the calls from those implementations.
-pub(crate) unsafe fn drop_unreachable_raw(mut self_: RawArrayViewMut, data_ptr: *mut A, data_len: usize)
-where
+pub(crate) unsafe fn drop_unreachable_raw(
+ mut self_: RawArrayViewMut,
+ data_ptr: *mut A,
+ data_len: usize,
+) where
D: Dimension,
{
let self_len = self_.len();
@@ -731,8 +813,11 @@ where
dropped_elements += 1;
}
- assert_eq!(data_len, dropped_elements + self_len,
- "Internal error: inconsistency in move_into");
+ assert_eq!(
+ data_len,
+ dropped_elements + self_len,
+ "Internal error: inconsistency in move_into"
+ );
}
/// Sort axes to standard order, i.e Axis(0) has biggest stride and Axis(n - 1) least stride
@@ -774,7 +859,6 @@ where
}
}
-
/// Sort axes to standard order, i.e Axis(0) has biggest stride and Axis(n - 1) least stride
///
/// Axes in a and b are sorted by the strides of `a`, and `a`'s axes should have stride >= 0 before
diff --git a/tests/shrink_to_fit.rs b/tests/shrink_to_fit.rs
new file mode 100644
index 000000000..fd5768202
--- /dev/null
+++ b/tests/shrink_to_fit.rs
@@ -0,0 +1,55 @@
+use ndarray::{s, Array};
+
+#[test]
+fn dim_0() {
+ let mut raw_vec = Vec::new();
+ for i in 0..4 * 5 * 6 {
+ raw_vec.push(i);
+ }
+ let a = Array::from_shape_vec((4, 5, 6), raw_vec).unwrap();
+ let mut a_slice = a.slice_move(s![0..2, .., ..]);
+ let a_slice_clone = a_slice.view().to_owned();
+ a_slice.shrink_to_fit();
+ assert_eq!(a_slice, a_slice_clone);
+}
+
+#[test]
+fn swap_axis_dim_0() {
+ let mut raw_vec = Vec::new();
+ for i in 0..4 * 5 * 6 {
+ raw_vec.push(i);
+ }
+ let mut a = Array::from_shape_vec((4, 5, 6), raw_vec).unwrap();
+ a.swap_axes(0, 1);
+ let mut a_slice = a.slice_move(s![2..3, .., ..]);
+ let a_slice_clone = a_slice.view().to_owned();
+ a_slice.shrink_to_fit();
+ assert_eq!(a_slice, a_slice_clone);
+}
+
+#[test]
+fn swap_axis_dim() {
+ let mut raw_vec = Vec::new();
+ for i in 0..4 * 5 * 6 {
+ raw_vec.push(i);
+ }
+ let mut a = Array::from_shape_vec((4, 5, 6), raw_vec).unwrap();
+ a.swap_axes(2, 1);
+ let mut a_slice = a.slice_move(s![2..3, 0..3, 0..;2]);
+ let a_slice_clone = a_slice.view().to_owned();
+ a_slice.shrink_to_fit();
+ assert_eq!(a_slice, a_slice_clone);
+}
+
+#[test]
+fn stride_negative() {
+ let mut raw_vec = Vec::new();
+ for i in 0..4 * 5 * 6 {
+ raw_vec.push(i);
+ }
+ let a = Array::from_shape_vec((4, 5, 6), raw_vec).unwrap();
+ let mut a_slice = a.slice_move(s![2..3, 0..3, 0..;-1]);
+ let a_slice_clone = a_slice.view().to_owned();
+ a_slice.shrink_to_fit();
+ assert_eq!(a_slice, a_slice_clone);
+}