diff --git a/.all-contributorsrc b/.all-contributorsrc
index 678e7dc2..1179f316 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -158,6 +158,16 @@
"contributions": [
"doc"
]
+ },
+ {
+ "login": "amangoel185",
+ "name": "Aman Goel",
+ "avatar_url": "https://avatars.githubusercontent.com/u/10528392?v=4",
+ "profile": "http://amangoel.me",
+ "contributions": [
+ "doc",
+ "code"
+ ]
}
],
"contributorsPerLine": 7,
diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml
index f42b3e36..b0c7bb21 100644
--- a/.github/workflows/examples.yml
+++ b/.github/workflows/examples.yml
@@ -20,7 +20,7 @@ jobs:
- uses: actions/setup-python@v1
with:
python-version: '3.8'
- - name: Pre-install Numpy
+ - name: Pre-install NumPy
run: python -m pip install -r dev-requirements.txt nbconvert ipykernel
- name: Install kernel
run: python -m ipykernel install --user --name boost-hist
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index fbc6d417..a5eb70f2 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -49,11 +49,10 @@ jobs:
strategy:
fail-fast: false
matrix:
- python-version: ["3.6", "3.9", "pypy3"]
+ python-version: ["3.6", "3.9", "3.10-dev", "pypy3"]
include:
- python-version: "3.8"
cmake-extras: "-DCMAKE_CXX_STANDARD=17"
- - python-version: "3.10-dev"
name: CMake Python ${{ matrix.python-version }}
@@ -83,6 +82,7 @@ jobs:
name: Wheels on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
+ fail-fast: false
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
@@ -91,11 +91,10 @@ jobs:
with:
submodules: true
- - uses: pypa/cibuildwheel@v2.0.0a4
+ - uses: pypa/cibuildwheel@v2.1.2
env:
CIBW_BUILD: cp38-win_amd64 cp310-manylinux_i686 cp37-macosx_x86_64
CIBW_BUILD_VERBOSITY: 1
- CIBW_PRERELEASE_PYTHONS: 1
- uses: actions/upload-artifact@v2
with:
diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml
index a9bfcc88..543ec20e 100644
--- a/.github/workflows/wheels.yml
+++ b/.github/workflows/wheels.yml
@@ -17,9 +17,13 @@ on:
types:
- published
+concurrency:
+ group: wheels-${{ github.head_ref }}
+ cancel-in-progress: true
+
env:
SETUPTOOLS_SCM_PRETEND_VERSION: ${{ github.event.inputs.overrideVersion }}
- CIBW_ENVIRONMENT: "PIP_ONLY_BINARY=numpy SETUPTOOLS_SCM_PRETEND_VERSION=${{ github.event.inputs.overrideVersion }}"
+ CIBW_ENVIRONMENT: "PIP_PREFER_BINARY=1 SETUPTOOLS_SCM_PRETEND_VERSION=${{ github.event.inputs.overrideVersion }}"
jobs:
build_sdist:
@@ -34,7 +38,7 @@ jobs:
run: pipx run build --sdist
- name: Check metadata
- run: pipx run twine check dist/*
+ run: pipx run twine check --strict dist/*
- uses: actions/upload-artifact@v2
with:
@@ -46,7 +50,7 @@ jobs:
runs-on: ubuntu-20.04
strategy:
matrix:
- python: [36, 37, 38, 39]
+ python: [36, 37, 38, 39, 310]
arch: [aarch64]
steps:
@@ -58,7 +62,7 @@ jobs:
with:
platforms: all
- - uses: pypa/cibuildwheel@v2.0.0a4
+ - uses: pypa/cibuildwheel@v2.1.2
env:
CIBW_BUILD: cp${{ matrix.python }}-*
CIBW_ARCHS: ${{ matrix.arch }}
@@ -79,14 +83,16 @@ jobs:
strategy:
fail-fast: false
matrix:
- os: [windows-latest, macos-latest]
+ os: [windows-latest, macos-latest, ubuntu-latest]
arch: [auto64]
- build: ["*"]
+ build: ["?p3?-* *macos* *win*"]
+ CIBW_MANYLINUX_X86_64_IMAGE: [manylinux2010]
+ CIBW_MANYLINUX_I686_IMAGE: [manylinux2010]
include:
- os: ubuntu-latest
- arch: auto
- build: "pp*"
+ arch: auto32
+ build: "?p3?-*"
CIBW_MANYLINUX_X86_64_IMAGE: manylinux2010
CIBW_MANYLINUX_I686_IMAGE: manylinux2010
@@ -97,6 +103,13 @@ jobs:
CIBW_MANYLINUX_X86_64_IMAGE: skhep/manylinuxgcc-x86_64
CIBW_MANYLINUX_I686_IMAGE: skhep/manylinuxgcc-i686
+ - os: ubuntu-latest
+ type: ManyLinux2014
+ arch: auto
+ build: "cp310-*"
+ CIBW_MANYLINUX_X86_64_IMAGE: manylinux2014
+ CIBW_MANYLINUX_I686_IMAGE: manylinux2014
+
- os: macos-latest
arch: universal2
build: "*"
@@ -110,7 +123,7 @@ jobs:
with:
submodules: true
- - uses: pypa/cibuildwheel@v2.0.0a4
+ - uses: pypa/cibuildwheel@v2.1.2
env:
CIBW_BUILD: ${{ matrix.build }}
CIBW_MANYLINUX_I686_IMAGE: ${{ matrix.CIBW_MANYLINUX_I686_IMAGE }}
diff --git a/.gitignore b/.gitignore
index cd34cf7d..b1769507 100644
--- a/.gitignore
+++ b/.gitignore
@@ -34,6 +34,7 @@
# Build directories
/*build*
/docs/_build/*
+/examples/*/*build*
/pip-wheel-metadata/*
# Outputs
@@ -56,6 +57,7 @@
*.swp
*~
*.clangd
+/.cache/*
compile_commands.json
# Venvs such as /.env
@@ -68,12 +70,14 @@ compile_commands.json
/node_modules/*
/yarn.lock
/package.json
+/package-lock.json
# Testing
/.benchmarks/*
+/.hypothesis/*
# PyCharm
-.idea
+/.idea/*
-# All-contributors
-package-lock.json
+# VSCode
+/.vscode/*
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index bd1f66a4..a830f23d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -4,26 +4,9 @@ ci:
repos:
- repo: https://github.com/psf/black
- rev: 21.6b0
+ rev: 21.8b0
hooks:
- - id: black
- types: [file]
- types_or: [python, pyi]
-
-- repo: https://github.com/nbQA-dev/nbQA
- rev: 0.13.1
- hooks:
- - id: nbqa-black
- additional_dependencies: [black==20.8b1]
- args: ["--nbqa-mutate"]
-
-# - repo: https://github.com/asottile/blacken-docs
-# rev: v1.10.0
-# hooks:
-# - id: blacken-docs
-# additional_dependencies: [black==20.8b1]
-# args: ["--target-version=py36"]
-# stages: ["manual"]
+ - id: black-jupyter
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.0.1
@@ -46,13 +29,13 @@ repos:
additional_dependencies: [pyyaml]
- repo: https://github.com/asottile/pyupgrade
- rev: v2.20.0
+ rev: v2.26.0
hooks:
- id: pyupgrade
args: [--py36-plus]
- repo: https://github.com/PyCQA/isort
- rev: 5.9.1
+ rev: 5.9.3
hooks:
- id: isort
@@ -60,6 +43,7 @@ repos:
rev: v1.17.0
hooks:
- id: setup-cfg-fmt
+ args: [--max-py-version=3.10]
- repo: https://github.com/pycqa/flake8
rev: 3.9.2
@@ -74,7 +58,7 @@ repos:
- id: mypy
files: src
args: []
- additional_dependencies: [numpy==1.20.*, uhi, types-dataclasses]
+ additional_dependencies: [numpy==1.21.*, uhi, types-dataclasses]
- repo: https://github.com/mgedmin/check-manifest
rev: "0.46"
@@ -82,13 +66,20 @@ repos:
- id: check-manifest
stages: [manual]
+- repo: https://github.com/codespell-project/codespell
+ rev: v2.1.0
+ hooks:
+ - id: codespell
+ args: ["-L", "hist,nd,circularly,ba"]
+ exclude: ^(notebooks/xarray.ipynb|notebooks/BoostHistogramHandsOn.ipynb)$
+
- repo: local
hooks:
- - id: disallow-pybind
- name: Disallow PyBind (dual caps)
+ - id: disallow-caps
+ name: Disallow improper capitalization
language: pygrep
- entry: PyBind
- exclude: .pre-commit.*.yaml
+ entry: PyBind|Numpy|Cmake|CCache|Github|PyTest
+ exclude: .pre-commit-config.yaml
- repo: local
hooks:
diff --git a/README.md b/README.md
index c761569b..aeda83f5 100644
--- a/README.md
+++ b/README.md
@@ -48,7 +48,7 @@ hist.fill(
[0.1, 0.4, 0.9],
)
-# Numpy array view into histogram counts, no overflow bins
+# NumPy array view into histogram counts, no overflow bins
values = hist.values()
# Make a new histogram with just the second axis, summing over the first, and
@@ -142,7 +142,7 @@ histograms can be plotted via any compatible library, such as [mplhep][].
* `v = h[{0:b}]`: All actions can be represented by `axis:item` dictionary instead of by position (mostly useful for slicing)
* Slicing to get histogram or set array of values
* `h2 = h[a:b]`: Access a slice of a histogram, cut portions go to flow bins if present
- * `h2 = h[:, ...]`: Using `:` and `...` supported just like Numpy
+ * `h2 = h[:, ...]`: Using `:` and `...` supported just like NumPy
* `h2 = h[::sum]`: Third item in slice is the "action"
* `h[...] = array`: Set the bin contents, either include or omit flow bins
* Special accessors
@@ -186,12 +186,12 @@ platforms have wheels provided in boost-histogram:
| System | Arch | Python versions | PyPy versions |
|---------|-----|------------------|--------------|
-| ManyLinux1 (custom GCC 9.2) | 32 & 64-bit | 3.7, 3.8 | |
-| ManyLinux2010 | 32 & 64-bit | 3.6, 3.7, 3.8, 3.9 | (64-bit) 7.3: 3.7 |
-| ManyLinux2014 | ARM64 | 3.6, 3.7, 3.8, 3.9 | |
-| macOS 10.9+ | 64-bit | 3.6, 3.7, 3.8, 3.9 | 7.3: 3.7 |
-| macOS Universal2 | Arm64 | 3.8, 3.9 | |
-| Windows | 32 & 64-bit | 3.6, 3.7, 3.8, 3.9 | (64-bit) 7.3: 3.7 |
+| ManyLinux1 (custom GCC 9.2) | 32 & 64-bit | 3.6, 3.7, 3.8 | |
+| ManyLinux2010 | 32 & 64-bit | 3.6, 3.7, 3.8, 3.9, 3.10 | (64-bit) 7.3: 3.7 |
+| ManyLinux2014 | ARM64 | 3.6, 3.7, 3.8, 3.9, 3.10 | |
+| macOS 10.9+ | 64-bit | 3.6, 3.7, 3.8, 3.9, 3.10 | 7.3: 3.7 |
+| macOS Universal2 | Arm64 | 3.8, 3.9, 3.10 | |
+| Windows | 32 & 64-bit | 3.6, 3.7, 3.8, 3.9, 3.10 | (64-bit) 7.3: 3.7 |
* manylinux1: Using a custom docker container with GCC 9 to produce. Anything running Python 3.9 should be compatible with manylinux2010, so manylinux1 not provided for Python 3.9 (like NumPy).
@@ -255,6 +255,7 @@ We would like to acknowledge the contributors that made this project possible ([
Konstantin Gizdov ๐ฆ ๐ |
Kyle Cranmer ๐ |
+ Aman Goel ๐ ๐ป |
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 8ec040cd..ce16ad9f 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -1,7 +1,40 @@
# What's new in boost-histogram
+## Version 1.2
+
+### Version 1.2.0
+
+#### User changes
+* Python 3.10 officially supported, with wheels.
+* Support subtraction on histograms [#636][]
+* Integer histograms are now signed [#636][]
+
+#### Bug fixes
+* Support custom setters on AxesTuple subclasses. [#627][]
+* Faster picking if slices are not also used [#645][] or if they are [#648][] (1000x or more in some cases)
+* Throw an error when an AxesTuple setter is the wrong length (inspired by zip strict in Python 3.10) [#627][]
+* Fix error thrown on comparison with axis and non-axis object [#631][]
+* Static typing no longer thinks `storage=` is required [#604][]
+
+#### Developer changes
+* Support NumPy 1.21 for static type checking [#625][]
+* Use newer Boost 1.77 and Boost.Histogram 1.77+1 [#594][]
+* Provide nox support [#647][]
+
+[#594]: https://github.com/scikit-hep/boost-histogram/pull/594
+[#604]: https://github.com/scikit-hep/boost-histogram/pull/604
+[#625]: https://github.com/scikit-hep/boost-histogram/pull/625
+[#627]: https://github.com/scikit-hep/boost-histogram/pull/627
+[#631]: https://github.com/scikit-hep/boost-histogram/pull/631
+[#636]: https://github.com/scikit-hep/boost-histogram/pull/636
+[#645]: https://github.com/scikit-hep/boost-histogram/pull/645
+[#647]: https://github.com/scikit-hep/boost-histogram/pull/647
+[#648]: https://github.com/scikit-hep/boost-histogram/pull/648
+
## Version 1.1
+### Version 1.1.0
+
#### User changes
* Experimentally support list selection on categorical axes [#577][]
* Support Python 3.8 on Apple Silicon [#600][]
@@ -15,7 +48,7 @@
#### Developer changes
* Test on Python 3.10 beta releases [#600][]
* Provide a CMakeLists for quick standalone Boost.Histogram C++ experiments [#591][]
-* Adding logging with PyTest failure output [#575][]
+* Adding logging with pytest failure output [#575][]
[#575]: https://github.com/scikit-hep/boost-histogram/pull/575
[#576]: https://github.com/scikit-hep/boost-histogram/pull/576
@@ -130,7 +163,7 @@ Python 2, and mostly equivalent in API to 1.0.
* Supports converting user histogram objects that provide a
`_to_boost_histogram_` method. [#483][]
* A `view=True` parameter must now be passed to get a View instead of a standard
- NumPy values arrray from `to_numpy()`. [#498][]
+ NumPy values array from `to_numpy()`. [#498][]
#### Bug fixes
@@ -484,7 +517,7 @@ library. Using Boost 1.72 release.
* Properties on accumulator views now resolve correctly [#273][]
* Division of a histogram by a number is supported again [#278][]
* Setting a histogram with length one slice fixed [#279][]
-* Numpy functions now work with Numpy ints in `bins=` [#282][]
+* NumPy functions now work with NumPy ints in `bins=` [#282][]
* In-place addition avoids a copy [#284][]
[#273]: https://github.com/scikit-hep/boost-histogram/pull/273
@@ -524,7 +557,7 @@ transition existing 0.5.x code to the new API.
* `h.axes` now has the functions from axis as well. [#183][]
* `bh.project` has become `bh.sum` [#185][]
* `.reduce(...)` and the reducers in `bh.algorithm` have been removed in favor of dictionary based UHI slicing [#259][]
-* `bh.numpy` module interface updates, `histogram=bh.Histogram` replaces cryptic `bh=True`, and `density=True` is now supported in Numpy mode [#256][]
+* `bh.numpy` module interface updates, `histogram=bh.Histogram` replaces cryptic `bh=True`, and `density=True` is now supported in NumPy mode [#256][]
* Added `hist.copy()` [#218][] and `hist.shape` [#264][]
* Signatures are much nicer in Python 3 [#188][]
* Reprs are better, various properties like `__module__` are now set correctly [#200][]
diff --git a/docs/index.rst b/docs/index.rst
index 14b9392f..8a7d2f1e 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -11,8 +11,8 @@
Welcome to boost-histogram's documentation!
===========================================
-|Gitter| |Build Status| |Actions Status| |Documentation Status| |DOI|
-|Code style: black| |PyPI version| |Conda-Forge| |Scikit-HEP|
+|Actions Status| |Documentation Status| |Code style: black| |PyPI version| |Conda-Forge|
+|PyPI platforms| |DOI| |GitHub Discussion| |Gitter| |Scikit-HEP|
Boost-histogram (`source `__) is
a Python package providing Python bindings for Boost.Histogram_ (`source
@@ -43,7 +43,7 @@ virtual environment, etc. See :ref:`usage-installation` for more details. An exa
hist.fill([.3, .5, .2],
[.1, .4, .9])
- # Numpy array view into histogram counts, no overflow bins
+ # NumPy array view into histogram counts, no overflow bins
counts = hist.view()
See :ref:`usage-quickstart` for more.
@@ -106,21 +106,23 @@ Indices and tables
* :ref:`search`
.. _Boost.Histogram: https://www.boost.org/doc/libs/release/libs/histogram/doc/html/index.html
-.. |Gitter| image:: https://badges.gitter.im/HSF/PyHEP-histogramming.svg
- :target: https://gitter.im/HSF/PyHEP-histogramming?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge
-.. |Build Status| image:: https://dev.azure.com/scikit-hep/boost-histogram/_apis/build/status/bh-tests?branchName=develop
- :target: https://dev.azure.com/scikit-hep/boost-histogram/_build/latest?definitionId=2&branchName=develop
.. |Actions Status| image:: https://github.com/scikit-hep/boost-histogram/workflows/Tests/badge.svg
:target: https://github.com/scikit-hep/boost-histogram/actions
.. |Documentation Status| image:: https://readthedocs.org/projects/boost-histogram/badge/?version=latest
:target: https://boost-histogram.readthedocs.io/en/latest/?badge=latest
-.. |DOI| image:: https://zenodo.org/badge/148885351.svg
- :target: https://zenodo.org/badge/latestdoi/148885351
.. |Code style: black| image:: https://img.shields.io/badge/code%20style-black-000000.svg
:target: https://github.com/ambv/black
.. |PyPI version| image:: https://badge.fury.io/py/boost-histogram.svg
- :target: https://pypi.org/project/boost-histogram/
+ :target: https://pypi.org/project/boost-histogram
.. |Conda-Forge| image:: https://img.shields.io/conda/vn/conda-forge/boost-histogram
:target: https://github.com/conda-forge/boost-histogram-feedstock
+.. |PyPI platforms| image:: https://img.shields.io/pypi/pyversions/boost-histogram
+ :target: https://pypi.org/project/boost-histogram/
+.. |DOI| image:: https://zenodo.org/badge/148885351.svg
+ :target: https://zenodo.org/badge/latestdoi/148885351
+.. |GitHub Discussion| image:: https://img.shields.io/static/v1?label=Discussions&message=Ask&color=blue&logo=github
+ :target: https://github.com/scikit-hep/boost-histogram/discussions
+.. |Gitter| image:: https://badges.gitter.im/HSF/PyHEP-histogramming.svg
+ :target: https://gitter.im/HSF/PyHEP-histogramming?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge
.. |Scikit-HEP| image:: https://scikit-hep.org/assets/images/Scikit--HEP-Project-blue.svg
:target: https://scikit-hep.org/
diff --git a/docs/usage/accumulators.rst b/docs/usage/accumulators.rst
index ab182ff7..98e3b676 100644
--- a/docs/usage/accumulators.rst
+++ b/docs/usage/accumulators.rst
@@ -37,14 +37,14 @@ showing how non-accurate sums fail to produce the obvious answer, 2.0::
print(f"{sum(values) = } (simple)")
print(f"{math.fsum(values) = }")
print(f"{np.sum(values) = } (pairwise)")
- print(f{bh.accumulators.Sum().fill(values) = }")
+ print(f"{bh.accumulators.Sum().fill(values) = }")
.. code:: text
- sum(values) = 0.0
+ sum(values) = 0.0 (simple)
math.fsum(values) = 2.0
- np.sum(values) = 0.0
- bh.accumulators.Sum().fill(values) = Sum(2)
+ np.sum(values) = 0.0 (pairwise)
+ bh.accumulators.Sum().fill(values) = Sum(0 + 2)
Note that this is still intended for performance and does not guarantee
@@ -53,12 +53,12 @@ orders of values::
values = [1., 1e100, 1e50, 1., -1e50, -1e100]
print(f"{math.fsum(values) = }")
- print(f{bh.accumulators.Sum().fill(values) = }")
+ print(f"{bh.accumulators.Sum().fill(values) = }")
.. code:: text
math.fsum(values) = 2.0
- bh.accumulators.Sum().fill(values) = Sum(0)
+ bh.accumulators.Sum().fill(values) = Sum(0 + 0)
You should note that this is a highly contrived example and the Sum accumulator
should still outperform simple and pairwise summation methods for a minimal
@@ -156,7 +156,11 @@ Views
-----
Most of the accumulators (except Sum) support a View. This is what is returned from
-a histogram when ``.view()`` is requested. This is a structured Numpy ndarray, with a few small
-additions to make them easier to work with. Like a Numpy recarray, you can access the fields with
+a histogram when ``.view()`` is requested. This is a structured NumPy ndarray, with a few small
+additions to make them easier to work with. Like a NumPy recarray, you can access the fields with
attributes; you can even access (but not set) computed attributes like ``.variance``. A view will
-also return an accumulator instance if you select a single item.
+also return an accumulator instance if you select a single item. You can set a view's contents
+with a stacked array, and each item in the stack will be used for the (computed) values that a
+normal constructor would take. For example, WeighedMean can take an array with a final
+dimension four long, with ``sum_of_weights``, ``sum_of_weights_squared``, ``value``, and ``variance``
+elements, even though several of these values are computed from the internal representation.
diff --git a/docs/usage/analyses.rst b/docs/usage/analyses.rst
index e6c3811a..305f4ffd 100644
--- a/docs/usage/analyses.rst
+++ b/docs/usage/analyses.rst
@@ -36,7 +36,7 @@ True ``is_valid`` selection, you can use a ``sum``:
.. code:: python3
- h1 = hist[:, True, :: bh.sum]
+ h1 = hist[:, True, sum]
You can expand this example to any number of dimensions, boolean flags,
and categories.
diff --git a/docs/usage/histogram.rst b/docs/usage/histogram.rst
index 4f6369a1..ed7d6e6a 100644
--- a/docs/usage/histogram.rst
+++ b/docs/usage/histogram.rst
@@ -19,13 +19,25 @@ The summing accumulators (not ``Mean()`` and ``WeightedMean())``) support thread
Data
^^^^
-The primary value from a histogram is always available as ``.value()``. The variance is available as ``.variances()``, unless you fill an unweighed histogram with weights, which will cause this to be return None, since the variance is no longer computable (use a weighted storage instead if you need the variances). The counts are available as ``.counts()``. If the histogram is weighted, ``.counts()`` returns the effective counts; see `UHI `_ for details.
+The primary values from a histogram are always available as ``.values()``. The variances are available as ``.variances()``, unless you fill an unweighed histogram with weights, which will cause this to return None, since the variances are no longer computable (use a weighted storage instead if you need the variances). The counts are available as ``.counts()``. If the histogram is weighted, ``.counts()`` returns the effective counts; see `UHI `_ for details.
Views
^^^^^
-While Histograms do conform to the Python buffer protocol, the best way to get access to the raw contents of a histogram as a NumPy array is with ``.view()``. This way you can optionally pass ``flow=True`` to get the flow bins, and if you have an accumulator storage, you will get a View, which is a slightly augmented ndarrray subclass (see :ref:`usage-accumulators`).
+While Histograms do conform to the Python buffer protocol, the best way to get access to the raw contents of a histogram as a NumPy array is with ``.view()``. This way you can optionally pass ``flow=True`` to get the flow bins, and if you have an accumulator storage, you will get a View, which is a slightly augmented ndarrray subclass (see :ref:`usage-accumulators`). Views support setting as well for non-computed properties; you can use an expression like this to set the values of an accumulator storage:
+.. code:: python3
+
+ h.view().value = values
+
+
+You can also used stacked arrays (N+1 dimensional) to set a histogram's contents. This is especially useful if you need to set a computed value, like variance on a Mean/WeightedMean storage, which cannot be set using the above method:
+
+.. code:: python3
+
+ h[...] = np.stack([values, variances], axis=-1)
+
+If you leave endpoints off (such as with ``...`` above), then you can match the size with or without flow bins.
Operations
^^^^^^^^^^
diff --git a/docs/usage/installation.rst b/docs/usage/installation.rst
index c44e067a..2f2da569 100644
--- a/docs/usage/installation.rst
+++ b/docs/usage/installation.rst
@@ -79,7 +79,7 @@ are dictated by Boost.Histogramโs C++ requirements: gcc >= 5.5, clang >=
3.8, msvc >= 14.1. You should have a version of pip less than 2-3 years
old (10+).
-Numpy is downloaded during the build (enables multithreaded builds).
+NumPy is downloaded during the build (enables multithreaded builds).
Boost is not required or needed (this only depends on included
header-only dependencies).This library is under active development; you
can install directly from GitHub if you would like.
diff --git a/docs/usage/quickstart.rst b/docs/usage/quickstart.rst
index df804776..23578ecf 100644
--- a/docs/usage/quickstart.rst
+++ b/docs/usage/quickstart.rst
@@ -64,10 +64,10 @@ should give arrays, but single values work as well:
Slicing and rebinning
---------------------
-You can slice into a histogram using bin coordinates
-or data coordinates using ``bh.loc(v)``. You can also
-rebin with ``bh.rebin(n)`` or remove an entire axis
-using ``bh.sum`` as the third slice argument:
+You can slice into a histogram using bin coordinates or data coordinates using
+``bh.loc(v)``. You can also rebin with ``bh.rebin(n)`` or remove an entire axis
+using ``sum`` (technically as the third slice argument, though it is allowed by
+itself as well):
.. code:: python3
@@ -76,7 +76,7 @@ using ``bh.sum`` as the third slice argument:
bh.axis.Regular(10, 0, 1),
bh.axis.Regular(10, 0, 1),
)
- mini = hist[1:5, bh.loc(0.2) : bh.loc(0.9), :: bh.sum]
+ mini = hist[1:5, bh.loc(0.2) : bh.loc(0.9), sum]
# Will be 4 bins x 7 bins
See :ref:`usage-indexing`.
@@ -84,7 +84,7 @@ See :ref:`usage-indexing`.
Accessing the contents
----------------------
-You can use ``hist.values()`` to get a Numpy array from any histogram. You can
+You can use ``hist.values()`` to get a NumPy array from any histogram. You can
get the variances with ``hist.variances()``, though if you fill an unweighted
storage with weights, this will return None, as you no longer can compute the
variances correctly (please use a weighted storage if you need to). You can
diff --git a/docs/usage/storage.rst b/docs/usage/storage.rst
index 97e5ad64..4ca0e660 100644
--- a/docs/usage/storage.rst
+++ b/docs/usage/storage.rst
@@ -3,7 +3,7 @@
Storages
========
-There are several storage to choose from. To select a storage, pass the
+There are several storages to choose from. To select a storage, pass the
``storage=bh.storage.`` argument when making a histogram.
Simple storages
diff --git a/docs/usage/transforms.rst b/docs/usage/transforms.rst
index e7ff8f1c..cb871dda 100644
--- a/docs/usage/transforms.rst
+++ b/docs/usage/transforms.rst
@@ -38,7 +38,7 @@ transformed axis, and this will be 15-90 times slower than a compiled method, li
10, 1, 4, transform=bh.axis.transform.Function(ftype(math.log), ftype(math.exp))
)
- # Pure Python: Numpy (90x slower)
+ # Pure Python: NumPy (90x slower)
bh.axis.Regular(
10, 1, 4, transform=bh.axis.transform.Function(ftype(np.log), ftype(np.exp))
)
@@ -152,7 +152,7 @@ That's it.
Using Numba
^^^^^^^^^^^
-The same procedure works for numba decorators. Numpy only supports functions, not builtins like ``math.log``,
+The same procedure works for numba decorators. NumPy only supports functions, not builtins like ``math.log``,
so if you want to pass those, you'll need to wrap them in a lambda function or add a bit of logic to the convert
function. Here are your options:
diff --git a/examples/simple_2d.py b/examples/simple_2d.py
index e6372c63..931a9da6 100755
--- a/examples/simple_2d.py
+++ b/examples/simple_2d.py
@@ -10,12 +10,12 @@
bh.axis.Regular(20, -3, 3, metadata="x"), bh.axis.Regular(20, -3, 3, metadata="y")
)
-# Generate some Numpy arrays with data to fill into histogram,
+# Generate some NumPy arrays with data to fill into histogram,
# in this case normal distributed random numbers in x and y
x = np.random.randn(1_000)
y = 0.5 * np.random.randn(1_000)
-# Fill histogram with Numpy arrays, this is very fast
+# Fill histogram with NumPy arrays, this is very fast
h.fill(x, y)
# Get numpy.histogram compatible representation of the histogram
diff --git a/examples/simple_numpy.py b/examples/simple_numpy.py
index 3cc41515..6e858f6e 100755
--- a/examples/simple_numpy.py
+++ b/examples/simple_numpy.py
@@ -10,7 +10,7 @@
bh.axis.Regular(10, -3, 3, metadata="x"), bh.axis.Regular(10, -3, 3, metadata="y")
)
-# Generate some Numpy arrays with data to fill into histogram,
+# Generate some NumPy arrays with data to fill into histogram,
# in this case normal distributed random numbers in x and y
x_data = np.random.randn(1000)
y_data = 0.5 * np.random.randn(1000)
@@ -18,7 +18,7 @@
# Fill histogram with numpy arrays, this is very fast
h.fill(x_data, y_data)
-# Get representations of the bin edges as Numpy arrays
+# Get representations of the bin edges as NumPy arrays
x = h.axes[0].edges
y = h.axes[1].edges
diff --git a/extern/assert b/extern/assert
index 2cd99e24..6aabfeba 160000
--- a/extern/assert
+++ b/extern/assert
@@ -1 +1 @@
-Subproject commit 2cd99e24e834d600d51cf3b8cdc3df6ecfd8e9f1
+Subproject commit 6aabfebae6d4acf996fe711de4e9b444ad88c17d
diff --git a/extern/config b/extern/config
index fad75493..088b79a0 160000
--- a/extern/config
+++ b/extern/config
@@ -1 +1 @@
-Subproject commit fad75493bd2da03524854976f2d80dd7989c90fb
+Subproject commit 088b79a0ca751932010f82d3f95457c8b483fb9b
diff --git a/extern/core b/extern/core
index ca489306..f4b3d5db 160000
--- a/extern/core
+++ b/extern/core
@@ -1 +1 @@
-Subproject commit ca4893063fa244a690b1f799328f16408b794c77
+Subproject commit f4b3d5dba6f86caaf96e45901655a954b2ff68b4
diff --git a/extern/histogram b/extern/histogram
index 7c24d268..4a10c2c1 160000
--- a/extern/histogram
+++ b/extern/histogram
@@ -1 +1 @@
-Subproject commit 7c24d2683e6be78cda90dcc0026ceeb7b5131507
+Subproject commit 4a10c2c11b272f435f61233eb9f585604476b2f8
diff --git a/extern/mp11 b/extern/mp11
index 21cace4e..9d43d1f6 160000
--- a/extern/mp11
+++ b/extern/mp11
@@ -1 +1 @@
-Subproject commit 21cace4e574180ba64d9307a5e4ea9e5e94d3e8d
+Subproject commit 9d43d1f69660617266c9168e6e121ab2b0ea2287
diff --git a/extern/throw_exception b/extern/throw_exception
index da6f5420..95e02ea5 160000
--- a/extern/throw_exception
+++ b/extern/throw_exception
@@ -1 +1 @@
-Subproject commit da6f5420fe7e96f673ee185c2ff37cbaa3f95b84
+Subproject commit 95e02ea52b8525ecf34dbf1e7fae34af09986b8a
diff --git a/extern/variant2 b/extern/variant2
index 1ebc29aa..4153a535 160000
--- a/extern/variant2
+++ b/extern/variant2
@@ -1 +1 @@
-Subproject commit 1ebc29aa0277cbb284a70327b5af14519f2702bc
+Subproject commit 4153a535a0fa8eb4d18abc262fcf2ae834601261
diff --git a/include/bh_python/accumulators/ostream.hpp b/include/bh_python/accumulators/ostream.hpp
index 03e25158..2140d5a2 100644
--- a/include/bh_python/accumulators/ostream.hpp
+++ b/include/bh_python/accumulators/ostream.hpp
@@ -79,7 +79,7 @@ std::basic_ostream& operator<<(std::basic_ostream&
template
std::basic_ostream&
operator<<(std::basic_ostream& os,
- const ::boost::histogram::accumulators::thread_safe& x) {
+ const ::boost::histogram::accumulators::count& x) {
os << x.load();
return os;
}
diff --git a/include/bh_python/histogram.hpp b/include/bh_python/histogram.hpp
index 0d26afb0..3199ff45 100644
--- a/include/bh_python/histogram.hpp
+++ b/include/bh_python/histogram.hpp
@@ -20,8 +20,8 @@ namespace pybind11 {
/// The descriptor for atomic_* is the same as the descriptor for *, as long this uses
/// standard layout
template
-struct format_descriptor> : format_descriptor {
- static_assert(std::is_standard_layout>::value, "");
+struct format_descriptor> : format_descriptor {
+ static_assert(std::is_standard_layout>::value, "");
};
} // namespace pybind11
@@ -31,11 +31,11 @@ namespace detail {
template
py::buffer_info make_buffer_impl(const Axes& axes, bool flow, T* ptr) {
// strides are in bytes
- auto shape = bh::detail::make_stack_buffer(axes);
- auto strides = bh::detail::make_stack_buffer(axes);
- ssize_t stride = sizeof(T);
- unsigned rank = 0;
- char* start = reinterpret_cast(ptr);
+ auto shape = bh::detail::make_stack_buffer(axes);
+ auto strides = bh::detail::make_stack_buffer(axes);
+ py::ssize_t stride = sizeof(T);
+ unsigned rank = 0;
+ char* start = reinterpret_cast(ptr);
bh::detail::for_each_axis(axes, [&](const auto& axis) {
const bool underflow
= bh::axis::traits::options(axis) & bh::axis::option::underflow;
diff --git a/include/bh_python/pybind11.hpp b/include/bh_python/pybind11.hpp
index db948049..c46e9be9 100644
--- a/include/bh_python/pybind11.hpp
+++ b/include/bh_python/pybind11.hpp
@@ -45,7 +45,7 @@ std::string shift_to_string(const T& x) {
}
template
-void unchecked_set_impl(std::true_type, py::tuple& tup, ssize_t i, Obj&& obj) {
+void unchecked_set_impl(std::true_type, py::tuple& tup, py::ssize_t i, Obj&& obj) {
// PyTuple_SetItem steals a reference to 'val'
if(PyTuple_SetItem(tup.ptr(), i, obj.release().ptr()) != 0) {
throw py::error_already_set();
@@ -53,7 +53,7 @@ void unchecked_set_impl(std::true_type, py::tuple& tup, ssize_t i, Obj&& obj) {
}
template
-void unchecked_set_impl(std::false_type, py::tuple& tup, ssize_t i, T&& t) {
+void unchecked_set_impl(std::false_type, py::tuple& tup, py::ssize_t i, T&& t) {
unchecked_set_impl(std::true_type{}, tup, i, py::cast(std::forward(t)));
}
@@ -62,6 +62,6 @@ template
void unchecked_set(py::tuple& tup, std::size_t i, T&& t) {
unchecked_set_impl(std::is_base_of>{},
tup,
- static_cast(i),
+ static_cast(i),
std::forward(t));
}
diff --git a/include/bh_python/register_histogram.hpp b/include/bh_python/register_histogram.hpp
index 68ab7a78..acd41b47 100644
--- a/include/bh_python/register_histogram.hpp
+++ b/include/bh_python/register_histogram.hpp
@@ -97,6 +97,9 @@ auto register_histogram(py::module& m, const char* name, const char* desc) {
def_optionally(hist,
bh::detail::has_operator_rmul{},
py::self *= py::self);
+ def_optionally(hist,
+ bh::detail::has_operator_rsub{},
+ py::self -= py::self);
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
diff --git a/include/bh_python/storage.hpp b/include/bh_python/storage.hpp
index bcbdd5bd..a4027994 100644
--- a/include/bh_python/storage.hpp
+++ b/include/bh_python/storage.hpp
@@ -11,7 +11,7 @@
#include
#include
-#include
+#include
#include
#include
@@ -22,8 +22,8 @@
namespace storage {
// Names match Python names
-using int64 = bh::dense_storage;
-using atomic_int64 = bh::dense_storage>;
+using int64 = bh::dense_storage;
+using atomic_int64 = bh::dense_storage>;
using double_ = bh::dense_storage;
using unlimited = bh::unlimited_storage<>;
using weight = bh::dense_storage>;
@@ -81,7 +81,11 @@ void save(Archive& ar, const storage::atomic_int64& s, unsigned /* version */) {
// We cannot view the memory as a numpy array, because the internal layout of
// std::atomic is undefined. So no reinterpret_casts are allowed.
py::array_t a(static_cast(s.size()));
- std::copy(s.begin(), s.end(), a.mutable_data());
+
+ auto in_ptr = s.begin();
+ auto out_ptr = a.mutable_data();
+ for(; in_ptr != s.end(); ++in_ptr, ++out_ptr)
+ *out_ptr = in_ptr->value();
ar << a;
}
@@ -187,7 +191,7 @@ struct type_caster {
auto ptr = PyNumber_Long(src.ptr());
if(!ptr)
return false;
- value.store(PyLong_AsUnsignedLongLong(ptr));
+ value = PyLong_AsLongLong(ptr);
Py_DECREF(ptr);
return !PyErr_Occurred();
}
@@ -195,7 +199,7 @@ struct type_caster {
static handle cast(storage::atomic_int64::value_type src,
return_value_policy /* policy */,
handle /* parent */) {
- return PyLong_FromUnsignedLongLong(src.load());
+ return PyLong_FromLongLong(src.value());
}
};
} // namespace detail
diff --git a/include/bh_python/transform.hpp b/include/bh_python/transform.hpp
index 654ca392..3c86c12e 100644
--- a/include/bh_python/transform.hpp
+++ b/include/bh_python/transform.hpp
@@ -25,7 +25,7 @@ struct func_transform {
py::object _forward_converted; // Held for reference counting if conversion makes a
// new object (ctypes does not bump the refcount)
py::object _inverse_converted;
- py::object _convert_ob; // Called before computing tranform if not None
+ py::object _convert_ob; // Called before computing transform if not None
py::str _name; // Optional name (uses repr from objects otherwise)
/// Convert an object into a std::function. Can handle ctypes
@@ -84,7 +84,7 @@ struct func_transform {
src);
}
- // Note that each error is slighly different just to help with debugging
+ // Note that each error is slightly different just to help with debugging
throw py::type_error("Only ctypes double(double) and C++ functions allowed "
"(must be stateless)");
}
diff --git a/notebooks/BoostHistogramHandsOn.ipynb b/notebooks/BoostHistogramHandsOn.ipynb
index e6c9acd2..62c9ebe0 100644
--- a/notebooks/BoostHistogramHandsOn.ipynb
+++ b/notebooks/BoostHistogramHandsOn.ipynb
@@ -231,7 +231,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## 2: Drop-in replacement for Numpy\n",
+ "## 2: Drop-in replacement for NumPy\n",
"\n",
"To start using this yourself, you don't even need to change your code. Let's try the numpy adapters."
]
@@ -330,7 +330,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Now we can move over to boost-histogram one step at a time! Just to be complete, we can also go back to a Numpy tuple from a Histogram object:"
+ "Now we can move over to boost-histogram one step at a time! Just to be complete, we can also go back to a NumPy tuple from a Histogram object:"
]
},
{
@@ -948,7 +948,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Let's try to make a density histogram like Numpy's."
+ "Let's try to make a density histogram like NumPy's."
]
},
{
diff --git a/notebooks/PerformanceComparison.ipynb b/notebooks/PerformanceComparison.ipynb
index 998c425c..361fb265 100644
--- a/notebooks/PerformanceComparison.ipynb
+++ b/notebooks/PerformanceComparison.ipynb
@@ -81,7 +81,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "#### Traditional 1D Numpy Histogram\n",
+ "#### Traditional 1D NumPy Histogram\n",
"\n",
"This is reasonably optimized; it should provide good perforance."
]
@@ -232,7 +232,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "#### Traditional 2D Numpy histogram\n",
+ "#### Traditional 2D NumPy histogram\n",
"\n",
"Not as well optimized for regular filling."
]
diff --git a/notebooks/ThreadedFills.ipynb b/notebooks/ThreadedFills.ipynb
index d39fc625..745fd93c 100644
--- a/notebooks/ThreadedFills.ipynb
+++ b/notebooks/ThreadedFills.ipynb
@@ -136,7 +136,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "This makes four seperate histograms, then fills them and adds at the end."
+ "This makes four separate histograms, then fills them and adds at the end."
]
},
{
diff --git a/notebooks/xarray.ipynb b/notebooks/xarray.ipynb
index afef4ecb..9746a489 100644
--- a/notebooks/xarray.ipynb
+++ b/notebooks/xarray.ipynb
@@ -235,7 +235,7 @@
"### More features\n",
"\n",
"Let's add a few more features to our function defined above.\n",
- "* Let's allow bins to be a list of axes or even a completly prepared histogram; this will allow us to take advantage of boost-histogram features later.\n",
+ "* Let's allow bins to be a list of axes or even a completely prepared histogram; this will allow us to take advantage of boost-histogram features later.\n",
"* Let's add a weights keyword so we can do weighted histograms as well."
]
},
diff --git a/noxfile.py b/noxfile.py
new file mode 100644
index 00000000..31225c32
--- /dev/null
+++ b/noxfile.py
@@ -0,0 +1,50 @@
+import nox
+
+ALL_PYTHONS = ["3.6", "3.7", "3.8", "3.9"]
+
+nox.options.sessions = ["lint", "tests"]
+
+
+@nox.session(python=ALL_PYTHONS)
+def tests(session):
+ """
+ Run the unit and regular tests.
+ """
+ session.install(".[test]")
+ session.run("pytest")
+
+
+@nox.session
+def docs(session):
+ """
+ Build the docs. Pass "serve" to serve.
+ """
+
+ session.chdir("docs")
+ session.install("-r", "requirements.txt")
+ session.run("sphinx-build", "-M", "html", ".", "_build")
+
+ if session.posargs:
+ if "serve" in session.posargs:
+ session.log("Launching docs at http://localhost:8000/ - use Ctrl-C to quit")
+ session.run("python", "-m", "http.server", "8000", "-d", "_build/html")
+ else:
+ session.error("Unsupported argument to docs")
+
+
+@nox.session
+def lint(session):
+ """
+ Run the linter.
+ """
+ session.install("pre-commit")
+ session.run("pre-commit", "run", "--all-files")
+
+
+@nox.session
+def make_pickle(session):
+ """
+ Make a pickle file for this version
+ """
+ session.install(".[dev]")
+ session.run("python", "tests/pickles/make_pickle.py", *session.posargs)
diff --git a/pyproject.toml b/pyproject.toml
index 35b1759f..2dce28cf 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,9 +1,8 @@
[build-system]
requires = [
- "setuptools>=42",
+ "setuptools>=45",
"wheel",
- "toml",
- "setuptools_scm>=4.1.2"
+ "setuptools_scm[toml]>=4.1.2"
]
build-backend = "setuptools.build_meta"
@@ -12,7 +11,7 @@ write_to = "src/boost_histogram/version.py"
[tool.pytest.ini_options]
junit_family = "xunit2"
-addopts = "--benchmark-disable -Wd --strict-markers"
+addopts = "--benchmark-disable -Wd --strict-markers -ra"
xfail_strict = true
testpaths = ["tests"]
required_plugins = ["pytest-benchmark"]
@@ -85,5 +84,5 @@ ignore = [
[tool.cibuildwheel]
test-extras = "test"
test-command = "pytest {project}/tests"
-test-skip = ["pp*macos*", "pp*win*", "*universal2:arm64"]
+test-skip = ["pp*macos*", "pp*win*", "*universal2:arm64", "cp310*macos*"]
skip = ["pp*-manylinux_i686"] # not supported by NumPy
diff --git a/scripts/performance_report.py b/scripts/performance_report.py
index 93877a01..93e869a8 100644
--- a/scripts/performance_report.py
+++ b/scripts/performance_report.py
@@ -72,7 +72,7 @@ def print_timer(setup, statement, name, storage, fill, flow, base=None, n=10):
base = print_timer(
setup_1d,
"h, _ = np.histogram(vals, bins=bins, range=ranges)",
- name="Numpy",
+ name="NumPy",
storage="uint64",
fill="",
flow="no",
@@ -163,7 +163,7 @@ def print_timer(setup, statement, name, storage, fill, flow, base=None, n=10):
base = print_timer(
setup_2d,
"H, *ledges = np.histogram2d(*vals, bins=bins, range=ranges)",
- name="Numpy",
+ name="NumPy",
storage="uint64",
fill="",
flow="no",
diff --git a/setup.cfg b/setup.cfg
index 5e934a87..ae0b2dd0 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -28,6 +28,7 @@ classifiers =
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Scientific/Engineering
diff --git a/src/boost_histogram/_core/algorithm.pyi b/src/boost_histogram/_core/algorithm.pyi
index c04ad15d..32fbadbb 100644
--- a/src/boost_histogram/_core/algorithm.pyi
+++ b/src/boost_histogram/_core/algorithm.pyi
@@ -2,6 +2,7 @@ import enum
import typing
class reduce_command:
+ iaxis: int
def __repr__(self) -> str: ...
class slice_mode(enum.Enum):
diff --git a/src/boost_histogram/_core/axis/__init__.pyi b/src/boost_histogram/_core/axis/__init__.pyi
index 5947ee9b..63f24a4f 100644
--- a/src/boost_histogram/_core/axis/__init__.pyi
+++ b/src/boost_histogram/_core/axis/__init__.pyi
@@ -34,13 +34,13 @@ class _BaseAxis:
@property
def extent(self) -> int: ...
@property
- def edges(self) -> np.ndarray: ...
+ def edges(self) -> "np.typing.NDArray[Any]": ...
@property
- def centers(self) -> np.ndarray: ...
+ def centers(self) -> "np.typing.NDArray[Any]": ...
@property
- def widths(self) -> np.ndarray: ...
- def index(self, arg0: ArrayLike) -> int | np.ndarray: ...
- def value(self, arg0: ArrayLike) -> float | np.ndarray: ...
+ def widths(self) -> "np.typing.NDArray[Any]": ...
+ def index(self, arg0: ArrayLike) -> int | "np.typing.NDArray[Any]": ...
+ def value(self, arg0: ArrayLike) -> float | "np.typing.NDArray[Any]": ...
class _BaseRegular(_BaseAxis):
def __init__(self, bins: int, start: float, stop: float) -> None: ...
diff --git a/src/boost_histogram/_core/hist.pyi b/src/boost_histogram/_core/hist.pyi
index 8fc5d48e..523f3741 100644
--- a/src/boost_histogram/_core/hist.pyi
+++ b/src/boost_histogram/_core/hist.pyi
@@ -22,8 +22,8 @@ class _BaseHistogram:
def __copy__(self: T) -> T: ...
def __deepcopy__(self: T, memo: Any) -> T: ...
def __iadd__(self: T, other: _BaseHistogram) -> T: ...
- def to_numpy(self, flow: bool = ...) -> Tuple[np.ndarray, ...]: ...
- def view(self, flow: bool = ...) -> np.ndarray: ...
+ def to_numpy(self, flow: bool = ...) -> Tuple["np.typing.NDArray[Any]", ...]: ...
+ def view(self, flow: bool = ...) -> "np.typing.NDArray[Any]": ...
def axis(self, i: int = ...) -> axis._BaseAxis: ...
def fill(self, *args: ArrayLike, weight: ArrayLike | None = ...) -> None: ...
def empty(self, flow: bool = ...) -> bool: ...
diff --git a/src/boost_histogram/_internal/axestuple.py b/src/boost_histogram/_internal/axestuple.py
index 549cdb39..d2fb31d4 100644
--- a/src/boost_histogram/_internal/axestuple.py
+++ b/src/boost_histogram/_internal/axestuple.py
@@ -4,7 +4,7 @@
import numpy as np
from .axis import Axis
-from .utils import set_module
+from .utils import set_module, zip_strict
A = TypeVar("A", bound="ArrayTuple")
@@ -17,12 +17,12 @@ class ArrayTuple(tuple): # type: ignore
def __getattr__(self, name: str) -> Any:
if name in self._REDUCTIONS:
- return partial(getattr(np, name), np.broadcast_arrays(*self))
+ return partial(getattr(np, name), np.broadcast_arrays(*self)) # type: ignore
else:
return self.__class__(getattr(a, name) for a in self)
def __dir__(self) -> List[str]:
- names = dir(self.__class__) + dir(np.ndarray)
+ names = dir(self.__class__) + dir("np.typing.NDArray[Any]")
return sorted(n for n in names if not n.startswith("_"))
def __call__(self, *args: Any, **kwargs: Any) -> Any:
@@ -34,7 +34,7 @@ def broadcast(self: A) -> A:
Use this method to broadcast them out into their full memory
representation.
"""
- return self.__class__(np.broadcast_arrays(*self))
+ return self.__class__(np.broadcast_arrays(*self)) # type: ignore
B = TypeVar("B", bound="AxesTuple")
@@ -56,17 +56,17 @@ def extent(self) -> Tuple[int, ...]:
@property
def centers(self) -> ArrayTuple:
gen = (s.centers for s in self)
- return ArrayTuple(np.meshgrid(*gen, **self._MGRIDOPTS))
+ return ArrayTuple(np.meshgrid(*gen, **self._MGRIDOPTS)) # type: ignore
@property
def edges(self) -> ArrayTuple:
gen = (s.edges for s in self)
- return ArrayTuple(np.meshgrid(*gen, **self._MGRIDOPTS))
+ return ArrayTuple(np.meshgrid(*gen, **self._MGRIDOPTS)) # type: ignore
@property
def widths(self) -> ArrayTuple:
gen = (s.widths for s in self)
- return ArrayTuple(np.meshgrid(*gen, **self._MGRIDOPTS))
+ return ArrayTuple(np.meshgrid(*gen, **self._MGRIDOPTS)) # type: ignore
def value(self, *indexes: float) -> Tuple[float, ...]:
if len(indexes) != len(self):
@@ -97,7 +97,10 @@ def __getattr__(self, attr: str) -> Any:
return self.__class__(getattr(s, attr) for s in self)
def __setattr__(self, attr: str, values: Any) -> None:
- self.__class__(s.__setattr__(attr, v) for s, v in zip(self, values))
+ try:
+ return super().__setattr__(attr, values)
+ except AttributeError:
+ self.__class__(s.__setattr__(attr, v) for s, v in zip_strict(self, values))
value.__doc__ = Axis.value.__doc__
index.__doc__ = Axis.index.__doc__
diff --git a/src/boost_histogram/_internal/axis.py b/src/boost_histogram/_internal/axis.py
index 1ff830b1..818ffd86 100644
--- a/src/boost_histogram/_internal/axis.py
+++ b/src/boost_histogram/_internal/axis.py
@@ -138,10 +138,10 @@ def bin(self, index: float) -> Union[int, str, Tuple[float, float]]:
return self._ax.bin(index) # type: ignore
def __eq__(self, other: Any) -> bool:
- return self._ax == other._ax # type: ignore
+ return hasattr(other, "_ax") and self._ax == other._ax
def __ne__(self, other: Any) -> bool:
- return self._ax != other._ax # type: ignore
+ return (not hasattr(other, "_ax")) or self._ax != other._ax
@classmethod
def _convert_cpp(cls: Type[T], cpp_object: Any) -> T:
@@ -248,18 +248,18 @@ def __getitem__(self, i: AxCallOrInt) -> Union[int, str, Tuple[float, float]]:
return self.bin(i)
@property
- def edges(self) -> np.ndarray:
+ def edges(self) -> "np.typing.NDArray[Any]":
return self._ax.edges # type: ignore
@property
- def centers(self) -> np.ndarray:
+ def centers(self) -> "np.typing.NDArray[Any]":
"""
An array of bin centers.
"""
return self._ax.centers # type: ignore
@property
- def widths(self) -> np.ndarray:
+ def widths(self) -> "np.typing.NDArray[Any]":
"""
An array of bin widths.
"""
diff --git a/src/boost_histogram/_internal/hist.py b/src/boost_histogram/_internal/hist.py
index fa5ddb58..6e13b731 100644
--- a/src/boost_histogram/_internal/hist.py
+++ b/src/boost_histogram/_internal/hist.py
@@ -65,7 +65,9 @@
T = TypeVar("T")
-def _fill_cast(value: T, *, inner: bool = False) -> Union[T, np.ndarray, Tuple[T, ...]]:
+def _fill_cast(
+ value: T, *, inner: bool = False
+) -> Union[T, "np.typing.NDArray[Any]", Tuple[T, ...]]:
"""
Convert to NumPy arrays. Some buffer objects do not get converted by forcecast.
If not called by itself (inner=False), then will work through one level of tuple/list.
@@ -150,8 +152,8 @@ def __init__(self, *args: CppHistogram, metadata: Any = ...) -> None:
def __init__(
self,
*axes: Union[Axis, CppAxis],
- storage: Storage,
- metadata: Any = None,
+ storage: Storage = ...,
+ metadata: Any = ...,
) -> None:
...
@@ -297,13 +299,13 @@ def ndim(self) -> int:
def view(
self, flow: bool = False
- ) -> Union[np.ndarray, WeightedSumView, WeightedMeanView, MeanView]:
+ ) -> Union["np.typing.NDArray[Any]", WeightedSumView, WeightedMeanView, MeanView]:
"""
Return a view into the data, optionally with overflow turned on.
"""
return _to_view(self._hist.view(flow))
- def __array__(self) -> np.ndarray:
+ def __array__(self) -> "np.typing.NDArray[Any]":
return self.view(False)
def __eq__(self, other: Any) -> bool:
@@ -312,11 +314,15 @@ def __eq__(self, other: Any) -> bool:
def __ne__(self, other: Any) -> bool:
return (not hasattr(other, "_hist")) or self._hist != other._hist
- def __add__(self: H, other: Union["Histogram", np.ndarray, float]) -> H:
+ def __add__(
+ self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
+ ) -> H:
result = self.copy(deep=False)
return result.__iadd__(other)
- def __iadd__(self: H, other: Union["Histogram", np.ndarray, float]) -> H:
+ def __iadd__(
+ self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
+ ) -> H:
if isinstance(other, (int, float)) and other == 0:
return self
self._compute_inplace_op("__iadd__", other)
@@ -326,36 +332,69 @@ def __iadd__(self: H, other: Union["Histogram", np.ndarray, float]) -> H:
return self
- def __radd__(self: H, other: Union["Histogram", np.ndarray, float]) -> H:
+ def __radd__(
+ self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
+ ) -> H:
return self + other
+ def __sub__(
+ self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
+ ) -> H:
+ result = self.copy(deep=False)
+ return result.__isub__(other)
+
+ def __isub__(
+ self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
+ ) -> H:
+ if isinstance(other, (int, float)) and other == 0:
+ return self
+ self._compute_inplace_op("__isub__", other)
+
+ self.axes = self._generate_axes_()
+
+ return self
+
# If these fail, the underlying object throws the correct error
- def __mul__(self: H, other: Union["Histogram", np.ndarray, float]) -> H:
+ def __mul__(
+ self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
+ ) -> H:
result = self.copy(deep=False)
return result._compute_inplace_op("__imul__", other)
- def __rmul__(self: H, other: Union["Histogram", np.ndarray, float]) -> H:
+ def __rmul__(
+ self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
+ ) -> H:
return self * other
- def __truediv__(self: H, other: Union["Histogram", np.ndarray, float]) -> H:
+ def __truediv__(
+ self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
+ ) -> H:
result = self.copy(deep=False)
return result._compute_inplace_op("__itruediv__", other)
- def __div__(self: H, other: Union["Histogram", np.ndarray, float]) -> H:
+ def __div__(
+ self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
+ ) -> H:
result = self.copy(deep=False)
return result._compute_inplace_op("__idiv__", other)
- def __idiv__(self: H, other: Union["Histogram", np.ndarray, float]) -> H:
+ def __idiv__(
+ self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
+ ) -> H:
return self._compute_inplace_op("__idiv__", other)
- def __itruediv__(self: H, other: Union["Histogram", np.ndarray, float]) -> H:
+ def __itruediv__(
+ self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
+ ) -> H:
return self._compute_inplace_op("__itruediv__", other)
- def __imul__(self: H, other: Union["Histogram", np.ndarray, float]) -> H:
+ def __imul__(
+ self: H, other: Union["Histogram", "np.typing.NDArray[Any]", float]
+ ) -> H:
return self._compute_inplace_op("__imul__", other)
def _compute_inplace_op(
- self: H, name: str, other: Union["Histogram", np.ndarray, float]
+ self: H, name: str, other: Union["Histogram", "np.typing.NDArray[Any]", float]
) -> H:
# Also takes CppHistogram, but that confuses mypy because it's hard to pick out
if isinstance(other, Histogram):
@@ -442,26 +481,26 @@ def fill(
}:
raise RuntimeError("Mean histograms do not support threaded filling")
- data = [np.array_split(a, threads) for a in args_ars]
+ data = [np.array_split(a, threads) for a in args_ars] # type: ignore
if weight is None or np.isscalar(weight):
assert threads is not None
weights = [weight_ars] * threads
else:
- weights = np.array_split(weight_ars, threads)
+ weights = np.array_split(weight_ars, threads) # type: ignore
if sample_ars is None or np.isscalar(sample_ars):
assert threads is not None
samples = [sample_ars] * threads
else:
- samples = np.array_split(sample_ars, threads)
+ samples = np.array_split(sample_ars, threads) # type: ignore
if self._hist._storage_type is _core.storage.atomic_int64:
def fun(
weight: Optional[ArrayLike],
sample: Optional[ArrayLike],
- *args: np.ndarray,
+ *args: "np.typing.NDArray[Any]",
) -> None:
self._hist.fill(*args, weight=weight, sample=sample)
@@ -471,7 +510,7 @@ def fun(
def fun(
weight: Optional[ArrayLike],
sample: Optional[ArrayLike],
- *args: np.ndarray,
+ *args: "np.typing.NDArray[Any]",
) -> None:
local_hist = self._hist.__copy__()
local_hist.reset()
@@ -646,9 +685,12 @@ def _compute_commonindex(
def to_numpy(
self, flow: bool = False, *, dd: bool = False, view: bool = False
- ) -> Union[Tuple[np.ndarray, ...], Tuple[np.ndarray, Tuple[np.ndarray, ...]]]:
+ ) -> Union[
+ Tuple["np.typing.NDArray[Any]", ...],
+ Tuple["np.typing.NDArray[Any]", Tuple["np.typing.NDArray[Any]", ...]],
+ ]:
"""
- Convert to a Numpy style tuple of return arrays. Edges are converted to
+ Convert to a NumPy style tuple of return arrays. Edges are converted to
match NumPy standards, with upper edge inclusive, unlike
boost-histogram, where upper edge is exclusive.
@@ -799,8 +841,32 @@ def __getitem__( # noqa: C901
assert isinstance(stop, int)
slices.append(_core.algorithm.slice_and_rebin(i, start, stop, merge))
- logger.debug("Reduce with %s", slices)
- reduced = self._hist.reduce(*slices)
+ # Will be updated below
+ if slices or pick_set or pick_each or integrations:
+ reduced = self._hist
+ else:
+ logger.debug("Reduce actions are all empty, just making a copy")
+ reduced = copy.copy(self._hist)
+
+ if pick_each:
+ tuple_slice = tuple(
+ pick_each.get(i, slice(None)) for i in range(reduced.rank())
+ )
+ logger.debug("Slices for pick each: %s", tuple_slice)
+ axes = [
+ reduced.axis(i) for i in range(reduced.rank()) if i not in pick_each
+ ]
+ logger.debug("Axes: %s", axes)
+ new_reduced = reduced.__class__(axes)
+ new_reduced.view(flow=True)[...] = reduced.view(flow=True)[tuple_slice]
+ reduced = new_reduced
+ integrations = {i - sum(j <= i for j in pick_each) for i in integrations}
+ for slice_ in slices:
+ slice_.iaxis -= sum(j <= slice_.iaxis for j in pick_each)
+
+ if slices:
+ logger.debug("Reduce with %s", slices)
+ reduced = reduced.reduce(*slices)
if pick_set:
warnings.warn(
@@ -830,20 +896,6 @@ def __getitem__( # noqa: C901
new_reduced.view(flow=True)[...] = reduced_view
reduced = new_reduced
- if pick_each:
- tuple_slice = tuple(
- pick_each.get(i, slice(None)) for i in range(reduced.rank())
- )
- logger.debug("Slices for pick each: %s", tuple_slice)
- axes = [
- reduced.axis(i) for i in range(reduced.rank()) if i not in pick_each
- ]
- logger.debug("Axes: %s", axes)
- new_reduced = reduced.__class__(axes)
- new_reduced.view(flow=True)[...] = reduced.view(flow=True)[tuple_slice]
- reduced = new_reduced
- integrations = {i - sum(j <= i for j in pick_each) for i in integrations}
-
if integrations:
projections = [i for i in range(reduced.rank()) if i not in integrations]
reduced = reduced.project(*projections)
@@ -887,7 +939,7 @@ def __setitem__(
if (
value.ndim > 0
and len(view.dtype) > 0 # type: ignore
- and len(value.dtype) == 0 # type: ignore
+ and len(value.dtype) == 0
and len(view.dtype) == value.shape[-1] # type: ignore
):
value_shape = value.shape[:-1]
@@ -984,7 +1036,7 @@ def kind(self) -> Kind:
else:
return Kind.COUNT
- def values(self, flow: bool = False) -> np.ndarray:
+ def values(self, flow: bool = False) -> "np.typing.NDArray[Any]":
"""
Returns the accumulated values. The counts for simple histograms, the
sum of weights for weighted histograms, the mean for profiles, etc.
@@ -995,7 +1047,7 @@ def values(self, flow: bool = False) -> np.ndarray:
:param flow: Enable flow bins. Not part of PlottableHistogram, but
included for consistency with other methods and flexibility.
- :return: np.ndarray[np.float64]
+ :return: "np.typing.NDArray[Any]"[np.float64]
"""
view = self.view(flow)
@@ -1005,7 +1057,7 @@ def values(self, flow: bool = False) -> np.ndarray:
else:
return view.value # type: ignore
- def variances(self, flow: bool = False) -> Optional[np.ndarray]:
+ def variances(self, flow: bool = False) -> Optional["np.typing.NDArray[Any]"]:
"""
Returns the estimated variance of the accumulated values. The sum of squared
weights for weighted histograms, the variance of samples for profiles, etc.
@@ -1026,7 +1078,7 @@ def variances(self, flow: bool = False) -> Optional[np.ndarray]:
:param flow: Enable flow bins. Not part of PlottableHistogram, but
included for consistency with other methods and flexibility.
- :return: np.ndarray[np.float64]
+ :return: "np.typing.NDArray[Any]"[np.float64]
"""
view = self.view(flow)
@@ -1053,7 +1105,7 @@ def variances(self, flow: bool = False) -> Optional[np.ndarray]:
else:
return view.variance # type: ignore
- def counts(self, flow: bool = False) -> np.ndarray:
+ def counts(self, flow: bool = False) -> "np.typing.NDArray[Any]":
"""
Returns the number of entries in each bin for an unweighted
histogram or profile and an effective number of entries (defined below)
@@ -1073,7 +1125,7 @@ def counts(self, flow: bool = False) -> np.ndarray:
The larger the spread in weights, the smaller it is, but it is always 0
if filled 0 times, and 1 if filled once, and more than 1 otherwise.
- :return: np.ndarray[np.float64]
+ :return: "np.typing.NDArray[Any]"[np.float64]
"""
view = self.view(flow)
diff --git a/src/boost_histogram/_internal/utils.py b/src/boost_histogram/_internal/utils.py
index 1d9fc1d6..6fe7abe8 100644
--- a/src/boost_histogram/_internal/utils.py
+++ b/src/boost_histogram/_internal/utils.py
@@ -1,5 +1,17 @@
+import itertools
+import sys
import typing
-from typing import Callable, ClassVar, Iterator, Optional, Set, Type, TypeVar
+from typing import (
+ Any,
+ Callable,
+ ClassVar,
+ Iterator,
+ Optional,
+ Set,
+ Tuple,
+ Type,
+ TypeVar,
+)
import boost_histogram
@@ -176,3 +188,16 @@ def _walk_subclasses(cls: Type[object]) -> Iterator[Type[object]]:
# user subclasses to work
yield from _walk_subclasses(base)
yield base
+
+
+def zip_strict(*args: Any) -> Iterator[Tuple[Any, ...]]:
+ if sys.version_info >= (3, 10):
+ yield from zip(*args, strict=True)
+ return
+
+ marker = object()
+ for each in itertools.zip_longest(*args, fillvalue=marker):
+ for val in each:
+ if val is marker:
+ raise ValueError("zip() arguments are not the same length")
+ yield each
diff --git a/src/boost_histogram/_internal/view.py b/src/boost_histogram/_internal/view.py
index 6ebd9c51..b06ab245 100644
--- a/src/boost_histogram/_internal/view.py
+++ b/src/boost_histogram/_internal/view.py
@@ -6,11 +6,11 @@
from .typing import ArrayLike, StrIndex, Ufunc
-class View(np.ndarray):
+class View(np.ndarray): # type: ignore
__slots__ = ()
_FIELDS: ClassVar[Tuple[str, ...]]
- def __getitem__(self, ind: StrIndex) -> np.ndarray:
+ def __getitem__(self, ind: StrIndex) -> "np.typing.NDArray[Any]":
sliced = super().__getitem__(ind)
# If the shape is empty, return the parent type
@@ -24,7 +24,7 @@ def __getitem__(self, ind: StrIndex) -> np.ndarray:
return sliced # type: ignore
def __repr__(self) -> str:
- # Numpy starts the ndarray class name with "array", so we replace it
+ # NumPy starts the ndarray class name with "array", so we replace it
# with our class name
return f"{self.__class__.__name__}(\n " + repr(self.view(np.ndarray))[6:]
@@ -97,13 +97,13 @@ class WeightedSumView(View):
__slots__ = ()
_PARENT = WeightedSum
- value: np.ndarray
- variance: np.ndarray
+ value: "np.typing.NDArray[Any]"
+ variance: "np.typing.NDArray[Any]"
# Could be implemented on master View
def __array_ufunc__(
self, ufunc: Ufunc, method: str, *inputs: Any, **kwargs: Any
- ) -> np.ndarray:
+ ) -> "np.typing.NDArray[Any]":
# This one is defined for record arrays, so just use it
# (Doesn't get picked up the pass-through)
@@ -130,14 +130,14 @@ def __array_ufunc__(
# Addition of two views
if input_0.dtype == input_1.dtype:
- if ufunc in {np.add}:
+ if ufunc in {np.add, np.subtract}:
ufunc(
input_0["value"],
input_1["value"],
out=result["value"],
**kwargs,
)
- ufunc(
+ np.add(
input_0["variance"],
input_1["variance"],
out=result["variance"],
@@ -205,13 +205,13 @@ class WeightedMeanView(View):
__slots__ = ()
_PARENT = WeightedMean
- sum_of_weights: np.ndarray
- sum_of_weights_squared: np.ndarray
- value: np.ndarray
- _sum_of_weighted_deltas_squared: np.ndarray
+ sum_of_weights: "np.typing.NDArray[Any]"
+ sum_of_weights_squared: "np.typing.NDArray[Any]"
+ value: "np.typing.NDArray[Any]"
+ _sum_of_weighted_deltas_squared: "np.typing.NDArray[Any]"
@property
- def variance(self) -> np.ndarray:
+ def variance(self) -> "np.typing.NDArray[Any]":
with np.errstate(divide="ignore", invalid="ignore"):
return self["_sum_of_weighted_deltas_squared"] / ( # type: ignore
self["sum_of_weights"]
@@ -224,20 +224,20 @@ class MeanView(View):
__slots__ = ()
_PARENT = Mean
- count: np.ndarray
- value: np.ndarray
- _sum_of_deltas_squared: np.ndarray
+ count: "np.typing.NDArray[Any]"
+ value: "np.typing.NDArray[Any]"
+ _sum_of_deltas_squared: "np.typing.NDArray[Any]"
# Variance is a computation
@property
- def variance(self) -> np.ndarray:
+ def variance(self) -> "np.typing.NDArray[Any]":
with np.errstate(divide="ignore", invalid="ignore"):
- return self["_sum_of_deltas_squared"] / (self["count"] - 1) # type: ignore
+ return self["_sum_of_deltas_squared"] / (self["count"] - 1)
def _to_view(
- item: np.ndarray, value: bool = False
-) -> Union[np.ndarray, WeightedSumView, WeightedMeanView, MeanView]:
+ item: "np.typing.NDArray[Any]", value: bool = False
+) -> Union["np.typing.NDArray[Any]", WeightedSumView, WeightedMeanView, MeanView]:
for cls in View.__subclasses__():
if cls._FIELDS == item.dtype.names:
ret = item.view(cls)
diff --git a/src/boost_histogram/numpy.py b/src/boost_histogram/numpy.py
index 2488409c..a0eb8666 100644
--- a/src/boost_histogram/numpy.py
+++ b/src/boost_histogram/numpy.py
@@ -38,7 +38,7 @@ def histogramdd(
if normed is not None:
raise KeyError(
- "normed=True is not recommended for use in Numpy, and is not supported in boost-histogram; use density=True instead"
+ "normed=True is not recommended for use in NumPy, and is not supported in boost-histogram; use density=True instead"
)
if density and histogram is not None:
raise KeyError(
@@ -67,7 +67,7 @@ def histogramdd(
if np.issubdtype(type(b), np.integer):
if r is None:
# Nextafter may affect bin edges slightly
- r = (np.min(a[n]), np.max(a[n]))
+ r = (np.amin(a[n]), np.amax(a[n]))
cpp_ax = _core.axis.regular_numpy(b, r[0], r[1])
new_ax = _cast(None, cpp_ax, _axis.Axis)
axs.append(new_ax)
@@ -183,6 +183,6 @@ def histogram(
lets you set the number of threads to fill with (0 for auto, None for 1).
"""
- f.__doc__ = H.format(n.__name__) + n.__doc__
+ f.__doc__ = H.format(n.__name__) + (n.__doc__ or "")
del f, n, H
diff --git a/src/register_accumulators.cpp b/src/register_accumulators.cpp
index 5e2b51fd..77453d6c 100644
--- a/src/register_accumulators.cpp
+++ b/src/register_accumulators.cpp
@@ -59,7 +59,7 @@ decltype(auto) make_buffer() {
void register_accumulators(py::module& accumulators) {
// Naming convention:
- // If a value is publically available in Boost.Histogram accumulators
+ // If a value is publicly available in Boost.Histogram accumulators
// as a method, it has the same name in the numpy record array.
// If it is not available except through a computation, it has
// the same name as the private property without the trailing _.
diff --git a/src/register_algorithm.cpp b/src/register_algorithm.cpp
index fbf2e6d8..3d1aee03 100644
--- a/src/register_algorithm.cpp
+++ b/src/register_algorithm.cpp
@@ -10,6 +10,7 @@
void register_algorithms(py::module& algorithm) {
py::class_(algorithm, "reduce_command")
.def(py::init())
+ .def_readwrite("iaxis", &bh::algorithm::reduce_command::iaxis)
.def("__repr__", [](const bh::algorithm::reduce_command& self) {
using range_t = bh::algorithm::reduce_command::range_t;
diff --git a/src/register_axis.cpp b/src/register_axis.cpp
index 88b68935..32687087 100644
--- a/src/register_axis.cpp
+++ b/src/register_axis.cpp
@@ -55,7 +55,7 @@ void register_axes(py::module& mod) {
"bins"_a,
"start"_a,
"stop"_a,
- "tranform"_a)
+ "transform"_a)
.def_property_readonly("transform", [](const axis::regular_trans& self) {
return self.transform();
});
diff --git a/tests/conftest.py b/tests/conftest.py
index c43903a7..fe67f385 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,6 +1,6 @@
import pytest
-import boost_histogram # noqa: F401
+import boost_histogram as bh
@pytest.fixture(params=(False, True), ids=("no_growth", "growth"))
@@ -29,3 +29,30 @@ def flow(request):
)
def metadata(request):
return request.param
+
+
+@pytest.fixture(
+ params=(
+ bh.storage.Double,
+ bh.storage.Int64,
+ bh.storage.AtomicInt64,
+ bh.storage.Weight,
+ bh.storage.Unlimited,
+ ),
+ ids=("Double", "Int64", "AtomicInt64", "Weight", "Unlimited"),
+)
+def count_storage(request):
+ return request.param
+
+
+@pytest.fixture(
+ params=(
+ bh.storage.Double,
+ bh.storage.Int64,
+ bh.storage.AtomicInt64,
+ bh.storage.Unlimited,
+ ),
+ ids=("Double", "Int64", "AtomicInt64", "Unlimited"),
+)
+def count_single_storage(request):
+ return request.param
diff --git a/tests/pickles/bh_1.1.0.pkl b/tests/pickles/bh_1.1.0.pkl
new file mode 100644
index 00000000..aaaab253
Binary files /dev/null and b/tests/pickles/bh_1.1.0.pkl differ
diff --git a/tests/pickles/make_pickle.py b/tests/pickles/make_pickle.py
index 49939c8a..97b4ba18 100755
--- a/tests/pickles/make_pickle.py
+++ b/tests/pickles/make_pickle.py
@@ -3,14 +3,19 @@
import pickle
from pathlib import Path
+from typing import Optional
import typer
import boost_histogram as bh
+DIR = Path(__file__).parent.resolve()
+
def make_pickle(
- output: Path = typer.Argument(..., exists=False), *, protocol: int = 2 # noqa: B008
+ output: Optional[Path] = typer.Argument(None, exists=False), # noqa: B008
+ *,
+ protocol: int = 2,
):
"""
Make a pickle file with the current boost-histogram for use in tests.
@@ -18,7 +23,10 @@ def make_pickle(
VER = tuple(map(int, bh.__version__.split(".")))
- h1 = bh.Histogram(bh.axis.Regular(31, -15, 15))
+ if output is None:
+ output = DIR / f"bh_{bh.__version__}.pkl"
+
+ h1 = bh.Histogram(bh.axis.Regular(31, -15, 15), storage=bh.storage.Int64())
h2 = bh.Histogram(
bh.axis.Integer(0, 5, metadata={"hello": "world"}), storage=bh.storage.Weight()
)
diff --git a/tests/test_axis.py b/tests/test_axis.py
index 2b89b0c9..ad38979e 100644
--- a/tests/test_axis.py
+++ b/tests/test_axis.py
@@ -88,7 +88,7 @@ def test_metadata(axis, args, opt, kwargs):
# The point of this ABC is to force all the tests listed here to be implemented
-# for each axis type. PyTest instantiates these test classes for us, so missing
+# for each axis type. Pytest instantiates these test classes for us, so missing
# one really does fail the test.
class Axis(abc.ABC):
@abc.abstractmethod
@@ -197,6 +197,8 @@ def test_equal(self):
assert a != bh.axis.Regular(3, 1.0, 2.0)
assert a != bh.axis.Regular(4, 1.1, 2.0)
assert a != bh.axis.Regular(4, 1.0, 2.1)
+ assert a != object()
+ assert not (a == object()) # __eq__ and __ne__ are separately implemented
# metadata compare
assert bh.axis.Regular(1, 2, 3, metadata=1) == bh.axis.Regular(
@@ -325,12 +327,12 @@ def test_log_transform(self):
assert a.index(-1) == 2
assert a.index(0.99) == -1
- assert a.index(1.0) == 0
+ assert a.index(1.001) == 0
assert a.index(9.99) == 0
- assert a.index(10.0) == 1
+ assert a.index(10.01) == 1
assert a.index(99.9) == 1
- assert a.index(100) == 2
- assert a.index(1000) == 2
+ assert a.index(100.01) == 2
+ assert a.index(1000.1) == 2
assert a.bin(0)[0] == approx(1e0)
assert a.bin(1)[0] == approx(1e1)
diff --git a/tests/test_benchmark_category_axis.py b/tests/test_benchmark_category_axis.py
index 6c473fab..d6c3eb46 100644
--- a/tests/test_benchmark_category_axis.py
+++ b/tests/test_benchmark_category_axis.py
@@ -34,3 +34,39 @@ def run(h, data):
h.fill(data)
benchmark(run, h, tuple(values) if dtype is tuple else values.astype(dtype))
+
+
+@pytest.mark.benchmark(group="Pick")
+def test_pick_only(benchmark):
+
+ h = bh.Histogram(
+ bh.axis.StrCategory([str(i) for i in range(32)]),
+ bh.axis.StrCategory([str(i) for i in range(32)]),
+ bh.axis.StrCategory([str(i) for i in range(32)]),
+ bh.axis.Regular(32, 0, 320),
+ )
+
+ h[...] = 1.0
+
+ def run(h):
+ return h[bh.loc("13"), bh.loc("13"), bh.loc("13"), :].view()
+
+ benchmark(run, h)
+
+
+@pytest.mark.benchmark(group="Pick")
+def test_pick_and_slice(benchmark):
+
+ h = bh.Histogram(
+ bh.axis.StrCategory([str(i) for i in range(32)]),
+ bh.axis.StrCategory([str(i) for i in range(32)]),
+ bh.axis.StrCategory([str(i) for i in range(32)]),
+ bh.axis.Regular(32, 0, 320),
+ )
+
+ h[...] = 1.0
+
+ def run(h):
+ return h[3:29, bh.loc("13"), bh.loc("13"), :].view()
+
+ benchmark(run, h)
diff --git a/tests/test_histogram.py b/tests/test_histogram.py
index bcc7ca30..46ac0042 100644
--- a/tests/test_histogram.py
+++ b/tests/test_histogram.py
@@ -1,8 +1,11 @@
import functools
import operator
+import pickle
import sys
+from collections import OrderedDict
from io import BytesIO
+import env
import numpy as np
import pytest
from numpy.testing import assert_array_equal
@@ -10,15 +13,6 @@
import boost_histogram as bh
-try:
- import cPickle as pickle
-except ImportError:
- import pickle
-
-from collections import OrderedDict
-
-import env
-
def test_init():
bh.Histogram()
@@ -152,12 +146,8 @@ def test_fill_1d(flow):
assert get(h, bh.overflow) == 1
-@pytest.mark.parametrize(
- "storage",
- [bh.storage.Int64, bh.storage.Double, bh.storage.Unlimited, bh.storage.AtomicInt64],
-)
-def test_setting(storage):
- h = bh.Histogram(bh.axis.Regular(10, 0, 1), storage=storage())
+def test_setting(count_single_storage):
+ h = bh.Histogram(bh.axis.Regular(10, 0, 1), storage=count_single_storage())
h[bh.underflow] = 1
h[0] = 2
h[1] = 3
@@ -403,6 +393,50 @@ def test_add_2d_w(flow):
assert h[bh.tag.at(i), bh.tag.at(j)] == 2 * m[i][j]
+def test_sub_2d(flow, count_storage):
+
+ h0 = bh.Histogram(
+ bh.axis.Integer(-1, 2, underflow=flow, overflow=flow),
+ bh.axis.Regular(4, -2, 2, underflow=flow, overflow=flow),
+ storage=count_storage(),
+ )
+
+ h0.fill(-1, -2)
+ h0.fill(-1, -1)
+ h0.fill(0, 0)
+ h0.fill(0, 1)
+ h0.fill(1, 0)
+ h0.fill(3, -1)
+ h0.fill(0, -3)
+
+ m = h0.values(flow=True).copy()
+
+ if count_storage not in {bh.storage.AtomicInt64, bh.storage.Weight}:
+ h = h0.copy()
+ h -= h0
+ assert h.values(flow=True) == approx(m * 0)
+
+ h -= h0
+ assert h.values(flow=True) == approx(-m)
+
+ h2 = h0 - (h0 + h0 + h0)
+ assert h2.values(flow=True) == approx(-2 * m)
+
+ h3 = h0 - h0.view(flow=True) * 4
+ assert h3.values(flow=True) == approx(-3 * m)
+
+ h4 = h0.copy()
+ h4 -= h0.view(flow=True) * 5
+ assert h4.values(flow=True) == approx(-4 * m)
+
+ h5 = h0.copy()
+ h5 -= 2
+ assert h5.values(flow=True) == approx(m - 2)
+
+ h6 = h0 - 3
+ assert h6.values(flow=True) == approx(m - 3)
+
+
def test_repr():
hrepr = """Histogram(
Regular(3, 0, 1),
@@ -733,7 +767,7 @@ def test_pickle_bool():
assert_array_equal(a.view(), b.view())
-# Numpy tests
+# NumPy tests
def test_numpy_conversion_0():
@@ -1240,3 +1274,12 @@ def test_sum_empty_axis():
)
assert hist.sum().value == 0
assert "Str" in repr(hist)
+
+
+# Issue 618
+def test_negative_fill(count_storage):
+ h = bh.Histogram(bh.axis.Integer(0, 3), storage=count_storage())
+ h.fill(1, weight=-1)
+
+ answer = np.array([0, -1, 0])
+ assert h.values() == approx(answer)
diff --git a/tests/test_minihist_title.py b/tests/test_minihist_title.py
index d2042745..794b1f3a 100644
--- a/tests/test_minihist_title.py
+++ b/tests/test_minihist_title.py
@@ -41,6 +41,11 @@ def name(self):
"""
return tuple(ax.name for ax in self)
+ @name.setter
+ def name(self, values):
+ for ax, val in zip(self, values):
+ ax._ax.metadata["name"] = f"test: {val}"
+
# When you subclass Histogram or an Axes, you should register your family so
# boost-histogram will know what to convert C++ objects into.
@@ -152,3 +157,22 @@ def test_access():
assert hist_conv.axes["a"] == hist_conv.axes[0]
assert hist_conv.axes["b"] == hist_conv.axes[1]
+
+
+def test_hist_name_set():
+ hist_1 = CustomHist(Regular(10, 0, 1, name="a"), Regular(20, 0, 4, name="b"))
+
+ hist_1.axes.name = ("c", "d")
+ assert hist_1.axes.name == ("test: c", "test: d")
+
+ with pytest.raises(AttributeError):
+ hist_1.axes[0].name = "a"
+
+ hist_1.axes.label = ("one", "two")
+ assert hist_1.axes.label == ("one", "two")
+
+ with pytest.raises(ValueError):
+ hist_1.axes.label = ("one",)
+
+ with pytest.raises(ValueError):
+ hist_1.axes.label = ("one", "two", "three")
diff --git a/tests/test_pickles.py b/tests/test_pickles.py
index ca332740..b902169f 100644
--- a/tests/test_pickles.py
+++ b/tests/test_pickles.py
@@ -8,7 +8,7 @@
DIR = os.path.abspath(os.path.dirname(__file__))
-@pytest.mark.parametrize("version", ["0.10.2", "0.6.2", "0.11.1"])
+@pytest.mark.parametrize("version", ["0.10.2", "0.6.2", "0.11.1", "1.1.0"])
def test_read_pickle(version):
filename = os.path.join(DIR, "pickles", f"bh_{version}.pkl")
@@ -21,7 +21,9 @@ def test_read_pickle(version):
h2 = d["h2"]
h3 = d["h3"]
- assert h1._storage_type == bh.storage.Double
+ assert h1._storage_type == (
+ bh.storage.Double if version[0] == "0" else bh.storage.Int64
+ )
assert h2._storage_type == bh.storage.Weight
assert h3._storage_type == bh.storage.Double
diff --git a/tests/test_views.py b/tests/test_views.py
index c2f5c746..95c6cb95 100644
--- a/tests/test_views.py
+++ b/tests/test_views.py
@@ -65,9 +65,14 @@ def test_view_add(v):
assert_allclose(v2.value, [2, 5, 4, 3])
assert_allclose(v2.variance, [4, 7, 6, 5])
- v += 2
- assert_allclose(v.value, [2, 5, 4, 3])
- assert_allclose(v.variance, [4, 7, 6, 5])
+ v2 = v.copy()
+ v2 += 2
+ assert_allclose(v2.value, [2, 5, 4, 3])
+ assert_allclose(v2.variance, [4, 7, 6, 5])
+
+ v2 = v + v
+ assert_allclose(v2.value, v.value * 2)
+ assert_allclose(v2.variance, v.variance * 2)
def test_view_sub(v):
@@ -83,9 +88,14 @@ def test_view_sub(v):
assert_allclose(v2.value, [1, -2, -1, 0])
assert_allclose(v2.variance, [1, 4, 3, 2])
- v -= 2
- assert_allclose(v.value, [-2, 1, 0, -1])
- assert_allclose(v.variance, [4, 7, 6, 5])
+ v2 = v.copy()
+ v2 -= 2
+ assert_allclose(v2.value, [-2, 1, 0, -1])
+ assert_allclose(v2.variance, [4, 7, 6, 5])
+
+ v2 = v - v
+ assert_allclose(v2.value, [0, 0, 0, 0])
+ assert_allclose(v2.variance, v.variance * 2)
def test_view_unary(v):