diff --git a/.github/workflows/conda-build.yml b/.github/workflows/conda-build.yml new file mode 100644 index 00000000..61a56029 --- /dev/null +++ b/.github/workflows/conda-build.yml @@ -0,0 +1,43 @@ +name: Conda Build + +on: + push: + branches: + - development # You can change this to the branch you want to trigger on + +jobs: + build-linux: + name: CondaBuild (${{ matrix.python-version }}, ${{ matrix.os }}) + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: ["ubuntu-latest", "macos-latest", "windows-latest"] + python-version: ["3.8", "3.11"] + steps: + - name: Checkout Repository + uses: actions/checkout@v2 + + - name: Set up Conda + uses: conda-incubator/setup-miniconda@v3 + with: + auto-update-conda: true + python-version: ${{ matrix.python-version }} + mamba-version: "*" + channels: conda-forge,defaults + channel-priority: true + + - name: Display Conda Settings + shell: bash -el {0} + run: | + conda info + conda list + conda config --show-sources + conda config --show + printenv | sort + + - name: Build Conda Package + shell: bash -el {0} + run: | + mamba install conda-build boa + conda mambabuild conda-recipe diff --git a/.gitignore b/.gitignore index a86cab20..102b6fb5 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ dist/ doc/auto_examples tmp/ .vscode +.cache .vs .ipynb_checkpoints cmake-build-*/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a90a56f0..cb0646bf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -2,23 +2,10 @@ variables: GIT_SSL_NO_VERIFY: "1" stages: -# - sync - build - test - deploy -# github:push: -# stage: sync -# image: condaforge/mambaforge -# tags: -# - linux -# before_script: -# - git submodule update --init --recursive --remote -# script: -# - git switch -c ${CI_COMMIT_REF_NAME} -# - git push --force https://${GITHUB_TOKEN}@github.com/fluorescence-tools/imp.bff - - # general definitions .build: &build stage: build @@ -47,21 +34,21 @@ conda:build:lnx:x86: - linux - x86_64 conda:build:lnx:ppc64le: - <<: *conda_build_lnx - image: - entrypoint: [ "/bin/bash", "-i", "-c" ] - name: condaforge/linux-anvil-ppc64le - tags: - - linux - - ppc64le + <<: *conda_build_lnx + image: + entrypoint: [ "/bin/bash", "-i", "-c" ] + name: condaforge/linux-anvil-ppc64le + tags: + - linux + - ppc64le conda:build:lnx:aarch64: - <<: *conda_build_lnx - image: - entrypoint: [ "/bin/bash", "-i", "-c" ] - name: condaforge/linux-anvil-aarch64 - tags: - - linux - - aarch64 + <<: *conda_build_lnx + image: + entrypoint: [ "/bin/bash", "-i", "-c" ] + name: condaforge/linux-anvil-aarch64 + tags: + - linux + - aarch64 conda:build:osx: <<: *conda_build_posix diff --git a/README.md b/README.md index 278e972c..a266adef 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,12 @@ # tttrlib [![Anaconda-Server Badge](https://anaconda.org/tpeulen/tttrlib/badges/version.svg)](https://anaconda.org/tpeulen/tttrlib) [![PyPI version](https://badge.fury.io/py/tttrlib.svg)](https://pypi.org/project/tttrlib/) -[![pipeline status](https://gitlab.peulen.xyz/tpeulen/tttrlib/badges/master/pipeline.svg)](https://gitlab.peulen.xyz/tpeulen/tttrlib/badges/master/pipeline.svg) +![conda build](https://github.com/fluorescence-tools/tttrlib/actions/workflows/conda-build.yml/badge.svg) + ## General description tttrlib is a file format agnostic high performance library to -read and process time-tagged-time resolved (TTTR) data acquired by +read, process, and write time-tagged-time resolved (TTTR) data acquired by PicoQuant (PQ) and Becker & Hickl measurement devices/cards or TTTR files in the open Photon-HDF format. @@ -25,10 +26,90 @@ Scilab and R. Currently, tttrlib is wrapped for the use in Python. ![tttrlib FLIM][3] -tttrlib is a library that facilitates the interaction with TTTR data that can be -used to develop data analysis pipelines e.g. for single-molecule and image -spectroscopy. tttrlib is not intended as end-user software for specific application -purposes. +`tttrlib` is programmed in C++ and wrapped for python. Thus, it can be used to integrate time-resolved data into +advanced data analysis pipelines. + +### Capabilities + +* Fast reading TTTR files (IO limited) +* Generation / analysis of fluorescence decays +* Time window analysis +* Correlation of time event traces +* Filtering of time event traces to generate instrument response functions for fluorescence decays analysis without the need of independent measurements.. +* Fast photon distribution analysis +* Fast selection of photons from a photon stream + +Generation of fluorescence decay histograms tttrlib outperforms pure numpy and Python based +libraries by a factor of ~40. + +## Documentation + +### Installation +In an [anaconda](https://www.anaconda.com/) environment the library can +be installed by the following command: + +```console +conda install -c tpeulen tttrlib +``` + +Alternatively, you can use pip to install `tttrlib` + +```console +pip install tttrlib +``` + +### Usage +The API of tttrlib as well as some use cases are documented on its [web page](https://docs.peulen.xyz/tttrlib). +Below you find a small selection of code snippets. + +Access photon data as follows: +```python +import tttrlib +fn = 'photon_stream.ptu' +data = tttrlib.TTTR(fn) + +macro_times = data.macro_times +micro_times = data.micro_times +routing_channels = data.routing_channels +``` + +Print header-information: +```python +import tttrlib +fn = 'photon_stream.ptu' +data = tttrlib.TTTR(fn) +print(data.json) +``` + +Correlate photon streams: +```python +import tttrlib +fn = 'photon_stream.ptu' +data = tttrlib.TTTR(fn) +correlator = tttrlib.Correlator( + channels=([1], [2]), + tttr=data +) +taus = correlator.x_axis, +correlation_amplitude = correlator.correlation +``` + +Create intensity images from CLSM data: +```python +import tttrlib +fn = 'image.ptu' +data = tttrlib.TTTR(fn) +clsm = tttrlib.CLSM(data) + +channels = [0, 1] +prompt_range = [0, 16000] +clsm.fill(channels=channels, micro_time_ranges=[prompt_range]) + +intensity_image = clsm.intensity +``` + +tttrlib is in active development. In case you notice unusual behaviour do not +hesitate to contact the authors. ## Supported file formats @@ -44,27 +125,9 @@ purposes. ### Photon HDF5 ## Design goals -* Low memory footprint (keep objective large datasets, e.g. FLIM in memory). +* Low memory footprint (keep objective large datasets, e.g., FLIM in memory). * Platform independent C/C++ library with interfaces for scripting libraries -## Capabilities - -* Fast (IO limited) Reading TTTR files -* Generation / analysis of fluorescence decays -* Time window analysis -* Correlation of time event traces -* Filtering of time event traces to generate instrument response functions for fluorescence decays analysis without the need of independent measurements.. -* Fast photon distribution analysis -* Fast selection of photons from a photon stream - -Generation of fluorescence decay histograms tttrlib outperforms pure numpy and Python based -libraries by a factor of ~40. - -## Implementation - -Pure pure C/C++ high performance algorithms for real-time and interactive -analysis of TTTR data. - ## Building and Installation ### C++ shared library @@ -78,7 +141,7 @@ cmake .. sudo make install ``` -On Linux you can build and install a package instead (prefered): +On Linux you can build and install a package instead: ### Python bindings @@ -100,40 +163,18 @@ be installed by the following command: conda install -c tpeulen tttrlib ``` -For most users the later approach is recommended. Currently, pre-compiled +For most users, the latter approach is recommended. Currently, pre-compiled packages for the anaconda distribution system are available for Windows (x86), Linux (x86, ARM64, PPCle), and macOS (x86). Precompiled libary are linked against conda-forge HDF5 & Boost. Thus, the use of [miniforge](https://github.com/conda-forge/miniforge) -is highly recommended. +is recommended. Legacy 32-bit platforms and versions of programming languages, e.g., Python 2.7 are not supported. -### Pip install - -Ubuntu: - -Self compiled - -```console -sudo apt-get install libhdf5-dev boost-dev swig -``` - -```console -pip install https://github.com/fluorescence-tools/tttrlib -``` - -## Documentation - -The API of tttrlib as well as some use cases are documented -on its [web page](https://docs.peulen.xyz/tttrlib) - -Note, tttrlib is highly experimental library in current development. In -case you notice unusual behaviour do not hesitate to contact the authors. - ## License -Copyright 2007-2023 tttrlib developers. +Copyright 2007-2024 tttrlib developers. Licensed under the BSD-3-Clause -[3]: https://docs.peulen.xyz/tttrlib/_images/sphx_glr_plot_read_clsm_data_002.png "tttrlib FLIM" +[3]: https://github.com/fluorescence-tools/tttrlib/doc/logos/mashup.png "tttrlib FLIM" diff --git a/conda-recipe/bld.bat b/conda-recipe/bld.bat index 2abc2a7c..c214bcb6 100755 --- a/conda-recipe/bld.bat +++ b/conda-recipe/bld.bat @@ -1,5 +1,4 @@ cd %SRC_DIR% -git submodule update --recursive --init --remote echo "Build app wrapper" :: build app wrapper @@ -10,8 +9,14 @@ if errorlevel 1 exit 1 rmdir b2 /s /q mkdir b2 cd b2 -for /f %%A in ('python -c "import platform; print(platform.python_version())"') do set python_version=%%A -echo Python version: %python_version% + +REM Call Python with the --version flag to get the version information +for /f "tokens=2 delims= " %%v in ('%PYTHON% --version 2^>^&1') do set PYTHON_VERSION=%%v + +REM Extract only the numeric part of the version +for /f "tokens=1-3 delims=." %%a in ("%PYTHON_VERSION%") do set PYTHON_VERSION_NUMERIC=%%a.%%b.%%c + +echo Python version: %PYTHON_VERSION_NUMERIC% cmake .. -G "NMake Makefiles" ^ -DCMAKE_INSTALL_PREFIX="%LIBRARY_PREFIX%" ^ -DCMAKE_PREFIX_PATH="%PREFIX%" ^ @@ -19,11 +24,10 @@ cmake .. -G "NMake Makefiles" ^ -DCMAKE_BUILD_TYPE=Release ^ -DCMAKE_LIBRARY_OUTPUT_DIRECTORY="%SP_DIR%" ^ -DCMAKE_SWIG_OUTDIR="%SP_DIR%" ^ - -DPYTHON_VERSION="%python_version%" ^ + -DPYTHON_VERSION="%PYTHON_VERSION_NUMERIC%" ^ -DBUILD_LIBRARY=ON nmake install - :: Add wrappers to path for each Python command line tool :: (all files without an extension) cd %SRC_DIR%\bin diff --git a/conda-recipe/build.sh b/conda-recipe/build.sh index b8dd5819..d0d2e258 100755 --- a/conda-recipe/build.sh +++ b/conda-recipe/build.sh @@ -1,4 +1,3 @@ -git submodule update --recursive --init --remote mkdir b2 && cd b2 if [[ "${target_platform}" == osx-* ]]; then diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index b63187ee..bf6cdd00 100755 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -13,24 +13,23 @@ build: number: 1 detect_binary_files_with_prefix: True # [not win] +requirements: requirements: build: - - python - - numpy - - doxygen - {{ compiler('c') }} - {{ compiler('cxx') }} - llvm-openmp # [osx] - libgomp # [linux] - pkg-config # [not win] - - swig - - ninja - cmake + - ninja - make # [linux] - - typing # [py27] + - doxygen + - swig + - python + - numpy host: - - boost-cpp 1.78 # [py > 27] - - boost-cpp 1.69 # [py27] + - libboost-devel - hdf5 - python - setuptools @@ -38,16 +37,13 @@ requirements: - numpy run: - python - - tqdm # [py > 27] - - tqdm <=4.14 # [py27] + - tqdm - click - click-didyoumean - {{ pin_compatible('numpy') }} - scikit-image - - matplotlib + - matplotlib-base - hdf5 - - typing # [py27] - - importlib_metadata # [py27] test: imports: @@ -64,3 +60,4 @@ about: extra: recipe-maintainers: - tpeulen + - khemmen diff --git a/doc/logos/mashup.png b/doc/logos/mashup.png new file mode 100644 index 00000000..6454284d Binary files /dev/null and b/doc/logos/mashup.png differ diff --git a/doc/logos/mashup.pptx b/doc/logos/mashup.pptx new file mode 100644 index 00000000..e5abce36 Binary files /dev/null and b/doc/logos/mashup.pptx differ diff --git a/examples/correlation/plot_filtered_correlation.py b/examples/correlation/not_ready/plot_filtered_correlation.py similarity index 100% rename from examples/correlation/plot_filtered_correlation.py rename to examples/correlation/not_ready/plot_filtered_correlation.py diff --git a/examples/correlation/plot_confocor3_two_ch_correlation.py b/examples/correlation/plot_confocor3_two_ch_correlation.py new file mode 100644 index 00000000..7a3d206a --- /dev/null +++ b/examples/correlation/plot_confocor3_two_ch_correlation.py @@ -0,0 +1,121 @@ +""" +======================================= +Confocor3 two channel cross-correlation +======================================= + +The raw FCS data format of the Zeiss Confocor3 is relatively simple. +Zeiss Confocor3 raw files store time-difference between photons. +A relatively small header is followed by a set of 32-bit integers that +contain the time difference to the previous registered photon. Photons +registered by different channels are stored in separate files. + +In this example two raw files of a Confocor3 are read and merged into +a single photon stream. Next, the merged photon stream is used to compute +the cross-correlation between the two channels. + +""" +import pathlib +import numpy as np + +import pylab as plt +import tttrlib + +#%% +# Reading data +# ------------ +# The photon data registered by different detectors are saved in separate files. +# Read the data of all channels that should be correlated into separate containers. +fns = [str(p) for p in pathlib.Path('../../tttr-data/cz/fcs').glob('5a6ce6a348a08e3da9f7c0ab4ee0ce94_R1_P1_K1_Ch*.raw')] +tttr_data = [tttrlib.TTTR(fn, 'CZ-RAW') for fn in fns] + +#%% +# We combine the data in different files into a single TTTR container using the header +# of first file as template. +header = tttr_data[0].header +channels = [t.routing_channels[0] for t in tttr_data] +print("Used channels:", channels) + +#%% +# You can check the count rates of the channels using the macro time resolution contained +# in the header +macro_time_resolution = header.macro_time_resolution +count_rates = [len(t) / (t.macro_times[-1] * macro_time_resolution) for t in tttr_data] +print("Count rates:", count_rates) + +#%% +# Now we merge the data of the two detectors in a single container. The +# marco times need to be sorted first. +macro_times = np.concatenate([t.macro_times for t in tttr_data]) +routing_channels = np.concatenate([t.routing_channels for t in tttr_data]) +sorted_indices = np.argsort(macro_times) + +#%% +# Using the sorted macro times we sort the routing channel numbers +# and the macro times. +routing_channels = routing_channels[sorted_indices] +macro_times = macro_times[sorted_indices] + +#%% +# Note: no micro time and no event type in the raw Confocor3 format. +# Thus, we use ones for the micro time and the event type. +micro_times = np.ones_like(macro_times, dtype=np.uint16) +event_types = np.ones_like(macro_times, dtype=np.int8) + +#%% +# Using the merged marcro times and channel numbers, we create a new +# TTTR container. +tttr_merged = tttrlib.TTTR() +tttr_merged.set_header(header) +tttr_merged.append_events( + macro_times=macro_times, + micro_times=micro_times, + routing_channels=routing_channels, + event_types=event_types +) + +#%% +# The container can be used for standard analysis, e.g., correlations. +settings = { + "n_bins": 9, # n_bins and n_casc defines the settings of the multi-tau + "n_casc": 19, # correlation algorithm +} + +# Create correlator +# Caution: x-axis in units of macro time counter +# tttrlib.Correlator is unaware of the calibration in the TTTR object +correlator = tttrlib.Correlator( + channels=([1], [2]), + tttr=tttr_merged, + **settings +) +plt.semilogx( + correlator.x_axis, + correlator.correlation, + label="Corr(1,2)" +) + +correlator = tttrlib.Correlator( + channels=([1], [1]), + tttr=tttr_merged, + **settings +) +plt.semilogx( + correlator.x_axis, + correlator.correlation, + label="Corr(1,1)" +) + +correlator = tttrlib.Correlator( + channels=([2], [2]), + tttr=tttr_merged, + **settings +) +plt.semilogx( + correlator.x_axis, + correlator.correlation, + label="Corr(2,2)" +) + +plt.ylim(1.032, 1.050) +plt.show() + diff --git a/examples/correlation/plot_correlation_cr_filter.py b/examples/correlation/plot_correlation_cr_filter.py old mode 100755 new mode 100644 index 19e259bb..f9be358b --- a/examples/correlation/plot_correlation_cr_filter.py +++ b/examples/correlation/plot_correlation_cr_filter.py @@ -42,7 +42,6 @@ # ``n_bins`` and ``n_casc`` define the settings of the multi-tau correlation steps. # If ``make_fine`` is set to false the micro time is not used for correlation. settings = { - "method": "default", "n_bins": 3, "n_casc": 27, "make_fine": False diff --git a/examples/correlation/plot_full_correlation.py b/examples/correlation/plot_full_correlation.py old mode 100755 new mode 100644 index 69c12028..23bebd95 --- a/examples/correlation/plot_full_correlation.py +++ b/examples/correlation/plot_full_correlation.py @@ -58,7 +58,6 @@ # times as time information, the default value of parameters ``make_fine`` # when creating a new Correlator needs to by modified: full_corr_settings = { - "method": 'default', "n_casc": 37, # n_bins and n_casc defines the settings of the multi-tau "n_bins": 7, # correlation algorithm "make_fine": True # Use the microtime information (also called "fine" correlation) @@ -103,5 +102,6 @@ ax.set_xlabel('corr. time (ms)') ax.set_ylabel('Correlation Amplitude') ax.legend() +ax.set_ylim(0, 12) # Set y-axis range to 0-12 plt.show() diff --git a/examples/correlation/plot_gated_correlation.py b/examples/correlation/plot_gated_correlation.py old mode 100755 new mode 100644 diff --git a/examples/correlation/plot_normal_correlation.py b/examples/correlation/plot_normal_correlation.py old mode 100755 new mode 100644 index 09b6d50c..41c095df --- a/examples/correlation/plot_normal_correlation.py +++ b/examples/correlation/plot_normal_correlation.py @@ -39,7 +39,6 @@ # Correlator settings, if the same settings are used repeatedly it is useful to define them once settings = { - "method": "default", "n_bins": 7, # n_bins and n_casc defines the settings of the multi-tau "n_casc": 27, # correlation algorithm "make_fine": False # Do not use the microtime information @@ -64,8 +63,8 @@ correlator.set_events(t1, w1, t2, w2) # scale the x-axis to have units in milliseconds (common unit in FCS) -x = correlator.curve.x * (data.header.macro_time_resolution) -y = correlator.curve.y +x = correlator.x * data.header.macro_time_resolution +y = correlator.y plt.semilogx(x, y, label="Gp/Gs") @@ -82,8 +81,8 @@ # no need to scale axis - correlator aware of macro time units plt.semilogx( - correlator.curve.x, - correlator.curve.y, + correlator.x, + correlator.y, label="Gp,Gs/Rp,Rs" ) @@ -95,8 +94,8 @@ ) plt.semilogx( - correlator.x_axis, - correlator.correlation, + correlator.x, + correlator.y, label="pR,sR" ) @@ -108,8 +107,8 @@ ) plt.semilogx( - correlator.x_axis, - correlator.correlation, + correlator.x, + correlator.y, label="pRsR,pGsG" ) diff --git a/examples/correlation/plot_sliced_correlation.py b/examples/correlation/plot_sliced_correlation.py old mode 100755 new mode 100644 index 96c8e7be..c9a269d1 --- a/examples/correlation/plot_sliced_correlation.py +++ b/examples/correlation/plot_sliced_correlation.py @@ -44,7 +44,7 @@ #%% # Next, we plan to split the TTTR data into separate data chunks. Here, chunk # the data into at least 5 seconds long pieces. The method ``get_ranges_by_time_window`` -# returns an one-dimensional array with the beginning and the end index of each +# returns a one-dimensional array with the beginning and the end index of each # chunk. Later, we will use these start/stop indices to define TTTR slices # that will be correlated. To slice the data into time windows we use the # macro time calibration we computed previously. Note, the last chunk can diff --git a/examples/flim/plot_mle_lifetime.py b/examples/flim/plot_mle_lifetime.py index 18dc8cb9..a9ed287f 100644 --- a/examples/flim/plot_mle_lifetime.py +++ b/examples/flim/plot_mle_lifetime.py @@ -19,14 +19,14 @@ ch_p = [0] ch_s = [1] binning_factor = 64 -minimum_n_photons = 10 +minimum_n_photons = 30 fn_clsm = '../../tttr-data/imaging/pq/ht3/crn_clv_img.ht3' data = tttrlib.TTTR(fn_clsm) # %% # Next we create two CLSM container for the parallel and perpendicular -# channel and stack frames to have more photons in each pixels. +# channel and stack frames to have more photons in each pixel. clsm_p = tttrlib.CLSMImage(data, channels=ch_p, fill=True) clsm_s = tttrlib.CLSMImage(data, channels=ch_s, fill=True) clsm_p.stack_frames() @@ -69,13 +69,17 @@ # %% # We iterate over all pixels in the image and apply the fit to -# pixels where we have at certain minimum number of photons +# pixels where we have at certain minimum number of photons. intensity = clsm_p.intensity micro_times = data.micro_times // binning_factor n_channels = data.header.number_of_micro_time_channels // binning_factor tau = np.zeros_like(intensity, dtype=np.float32) rho = np.zeros_like(intensity, dtype=np.float32) n_frames, n_lines, n_pixel = clsm_p.shape + +# These loops are not very fast but get the job done +import time +time_start = time.time() for i in range(n_frames): for j in range(0, n_lines, 1): for k in range(n_pixel): @@ -93,10 +97,12 @@ ) r = fit23(hist, x0, fixed) tau[i, j, k] = r['x'][0] +time_stop = time.time() +print("Elapsed time:", time_stop - time_start) - -plt.imshow(tau[0]) +plt.imshow(tau[0], vmin=0, vmax=5) plt.show() plt.hist(tau[0].flatten(), 131, range=(0.01, 5)) plt.show() + diff --git a/ext/CMakeLists.txt b/ext/CMakeLists.txt index f17a1543..f588c563 100644 --- a/ext/CMakeLists.txt +++ b/ext/CMakeLists.txt @@ -22,7 +22,8 @@ IF(BUILD_PYTHON_INTERFACE) WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}) ENDIF() - MESSAGE(STATUS "Building Python interface") + MESSAGE(STATUS "Building Python interface") + MESSAGE(STATUS "Python version: '${PYTHON_VERSION}'") cmake_policy(SET CMP0094 NEW) FIND_PACKAGE(Python ${PYTHON_VERSION} EXACT COMPONENTS Interpreter Development.Module NumPy) @@ -49,10 +50,10 @@ IF(BUILD_PYTHON_INTERFACE) MESSAGE(STATUS "SWIG_SUPPORT_FILES_DIRECTORY: '${SWIG_SUPPORT_FILES_DIRECTORY}'") MESSAGE(STATUS "MY_INSTALL_PREFIX: '${MY_INSTALL_PREFIX}'") SWIG_ADD_LIBRARY( - ${MODULE_NAME} - TYPE MODULE - LANGUAGE python - SOURCES python/tttrlib.i ${SRC_files} + ${MODULE_NAME} + TYPE MODULE + LANGUAGE python + SOURCES python/tttrlib.i ${SRC_files} ) TARGET_LINK_LIBRARIES( ${MODULE_NAME} @@ -70,20 +71,20 @@ IF(BUILD_PYTHON_INTERFACE) # ======= # Get the autogenerated Python file get_property(WRAPPER_PY_FILE - TARGET ${MODULE_NAME} - PROPERTY SWIG_SUPPORT_FILES) - + TARGET ${MODULE_NAME} + PROPERTY SWIG_SUPPORT_FILES) + # Install the autogenerated Python file INSTALL(FILES ${WRAPPER_PY_FILE} - DESTINATION ${MY_INSTALL_PREFIX} - COMPONENT bindings) - + DESTINATION ${MY_INSTALL_PREFIX} + COMPONENT bindings) + # Install the SWIG library INSTALL(TARGETS ${MODULE_NAME} - COMPONENT bindings - LIBRARY DESTINATION ${MY_INSTALL_PREFIX} - ARCHIVE DESTINATION ${MY_INSTALL_PREFIX} - RUNTIME DESTINATION ${MY_INSTALL_PREFIX} + COMPONENT bindings + LIBRARY DESTINATION ${MY_INSTALL_PREFIX} + ARCHIVE DESTINATION ${MY_INSTALL_PREFIX} + RUNTIME DESTINATION ${MY_INSTALL_PREFIX} ) ENDIF(BUILD_PYTHON_INTERFACE) @@ -109,5 +110,3 @@ IF(BUILD_R_INTERFACE) ) ENDIF() - - diff --git a/ext/python/Correlator.py b/ext/python/Correlator.py index 1f22f9cf..a97a313c 100644 --- a/ext/python/Correlator.py +++ b/ext/python/Correlator.py @@ -18,10 +18,18 @@ def __getattr__(self, item): def correlation(self): return self.get_corr_normalized() +@property +def y(self): + return self.correlation + @property def x_axis(self): return self.get_x_axis() +@property +def x(self): + return self.x_axis + @property def tttr(self): return self.get_tttr() diff --git a/include/CLSMImage.h b/include/CLSMImage.h index 2b26b403..8bf2e407 100644 --- a/include/CLSMImage.h +++ b/include/CLSMImage.h @@ -898,7 +898,7 @@ class CLSMImage { std::string subtract_average = "", double background = 0.0, bool clip=false, double clip_max=1e6, double clip_min=-1e6, - double *images = nullptr, int input_frames=-1, int input_lines=-1, int input_pixel=1, + double *images = nullptr, int n_frames=-1, int n_lines=-1, int n_pixels=1, uint8_t *mask = nullptr, int dmask1 = -1, int dmask2 = -1, int dmask3 = -1, std::vector selected_frames = std::vector() ); diff --git a/include/CorrelatorCurve.h b/include/CorrelatorCurve.h index 67cde8f5..0ea7c578 100644 --- a/include/CorrelatorCurve.h +++ b/include/CorrelatorCurve.h @@ -5,6 +5,8 @@ #include #include /* std::max */ #include /* pow */ +#include // include this header for uint64_t + struct CorrelationCurveSettings{ diff --git a/include/TTTR.h b/include/TTTR.h index a7b8f711..4ca4fdd3 100644 --- a/include/TTTR.h +++ b/include/TTTR.h @@ -130,23 +130,6 @@ void compute_intensity_trace( ); - -// Seems unused -// * Get the ranges in for a specific channel number -// * -// * @param[out] ranges -// * @param[out] n_range -// * @param[in] channel -// * @param[in] n_channel -// * @param[in] channel -// */ -//void get_ranges_channel( -// unsigned int **ranges, int *n_range, -// short *channel, int n_channel, -// int selection_channel -//); -// - /*! * \brief Extracts a subarray of valid events from the input array. * @@ -247,7 +230,8 @@ class TTTR : public std::enable_shared_from_this{ {PQ_RECORD_TYPE_PHT3, ProcessPHT3}, {BH_RECORD_TYPE_SPC600_256, ProcessSPC600_256}, {BH_RECORD_TYPE_SPC600_4096, ProcessSPC600_4096}, - {BH_RECORD_TYPE_SPC130, ProcessSPC130} + {BH_RECORD_TYPE_SPC130, ProcessSPC130}, + {CZ_RECORD_TYPE_CONFOCOR3, ProcessCzRaw} }; /*! @@ -261,6 +245,7 @@ class TTTR : public std::enable_shared_from_this{ * * BH_SPC600_256_CONTAINER 3 * * BH_SPC600_4096_CONTAINER 4 * * PHOTON_HDF5_CONTAINER 5 + * * CZ_CONFOCOR3_CONTAINER 6 * * The numbers correspond to the numbers that should be used when * initializing the class. @@ -463,7 +448,6 @@ class TTTR : public std::enable_shared_from_this{ size_t bytes_per_record ); - /*! * \brief Appends events to the TTTR object. * @@ -684,6 +668,7 @@ class TTTR : public std::enable_shared_from_this{ * - 3: Becker & Hickl SPC-600 with 256 channels Container (BH_SPC600_256_CONTAINER) * - 4: Becker & Hickl SPC-600 with 4096 channels Container (BH_SPC600_4096_CONTAINER) * - 5: Photon-HDF5 Container (PHOTON_HDF5_CONTAINER) + * - 6: Carl Zeiss ConfoCor3 (CZ_CONFOCOR3_CONTAINER) * @param read_input If true, reads the content of the file. */ TTTR(const char *filename, int container_type, bool read_input); @@ -699,6 +684,7 @@ class TTTR : public std::enable_shared_from_this{ * - 3: Becker & Hickl SPC-600 with 256 channels Container (BH_SPC600_256_CONTAINER) * - 4: Becker & Hickl SPC-600 with 4096 channels Container (BH_SPC600_4096_CONTAINER) * - 5: Photon-HDF5 Container (PHOTON_HDF5_CONTAINER) + * - 6: Carl Zeiss ConfoCor3 (CZ_CONFOCOR3_CONTAINER) */ TTTR(const char *filename, int container_type); @@ -713,6 +699,7 @@ class TTTR : public std::enable_shared_from_this{ * - "SPC-600_256": Becker & Hickl SPC-600 with 256 channels Container * - "SPC-600_4096": Becker & Hickl SPC-600 with 4096 channels Container * - "PHOTON-HDF5": Photon-HDF5 Container + * - "CZ_CONFOCOR3_CONTAINER": Carl Zeiss ConfoCor3 Container */ TTTR(const char *filename, const char* container_type); @@ -1075,7 +1062,6 @@ class TTTR : public std::enable_shared_from_this{ double background_fraction = -1.0 ); - /*! * @brief Computes the mean lifetime by moments of decay and instrument response. * diff --git a/include/TTTRHeader.h b/include/TTTRHeader.h index e7b7fa5c..c2b7f9e6 100644 --- a/include/TTTRHeader.h +++ b/include/TTTRHeader.h @@ -46,6 +46,7 @@ class TTTRHeader { friend class TTTR; protected: + // JSON object used to store all the header information nlohmann::json json_data; @@ -241,15 +242,14 @@ class TTTRHeader { TTTRHeader(std::string fn, int tttr_container_type=0); ~TTTRHeader() = default; - /*! Reads the header of a ptu file and sets the reading routing for + /*! + * @brief Reads the header of a PTU file and sets the reading routing. * - * @param fpin - * @param rewind - * @param tttr_record_type - * @param json_data - * @param macro_time_resolution - * @param micro_time_resolution - * @return The position of the file pointer at the end of the header + * @param fpin File pointer to the PTU file. + * @param tttr_record_type Output parameter for the TTTR record type. + * @param json_data Output parameter for JSON data. + * @param rewind Flag to indicate whether to rewind the file (default is true). + * @return The position of the file pointer at the end of the header. */ static size_t read_ptu_header( std::FILE *fpin, @@ -258,26 +258,27 @@ class TTTRHeader { bool rewind = true ); - /*! Reads the header of a ht3 file and sets the reading routing for + /*! + * @brief Reads the header of an HT3 file and sets the reading routing. * - * @param fpin - * @param rewind - * @param tttr_record_type - * @param data - * @return The position of the file pointer at the end of the header + * @param fpin File pointer to the HT3 file. + * @param data Output parameter for JSON data. + * @param rewind Flag to indicate whether to rewind the file (default is true). + * @return The position of the file pointer at the end of the header. */ static size_t read_ht3_header( std::FILE *fpin, nlohmann::json &data, - bool rewind=true + bool rewind = true ); - /*! Reads the header of a Becker&Hickel SPC132 file and sets the reading routing + /*! + * @brief Reads the header of a Becker & Hickel SPC132 file and sets the reading routing. * - * @param fpin - * @param rewind - * @param tttr_record_type - * @param data JSON dictionary that will contain the header information + * @param fpin File pointer to the SPC132 file. + * @param data Output parameter for JSON data. + * @param rewind Flag to indicate whether to rewind the file (default is true). + * @return The position of the file pointer at the end of the header. */ static size_t read_bh132_header( std::FILE *fpin, @@ -285,6 +286,20 @@ class TTTRHeader { bool rewind = true ); + /*! + * @brief Reads the header of a Carl Zeiss (CZ) Confocor3 file and sets the reading routing. + * + * @param fpin File pointer to the Confocor3 file. + * @param data Output parameter for JSON data. + * @param rewind Flag to indicate whether to rewind the file (default is true). + * @return The position of the file pointer at the end of the header. + */ + static size_t read_cz_confocor3_header( + std::FILE *fpin, + nlohmann::json &data, + bool rewind = true + ); + /*! * Write a spc132 header to a file * diff --git a/include/TTTRHeaderTypes.h b/include/TTTRHeaderTypes.h index 1fc2489b..c4dbd24f 100644 --- a/include/TTTRHeaderTypes.h +++ b/include/TTTRHeaderTypes.h @@ -37,6 +37,7 @@ #define BH_SPC600_256_CONTAINER 3 #define BH_SPC600_4096_CONTAINER 4 #define PHOTON_HDF_CONTAINER 5 +#define CZ_CONFOCOR3_CONTAINER 6 // tttrlib record type identifier definitions #define PQ_RECORD_TYPE_HHT2v2 1 @@ -48,6 +49,7 @@ #define BH_RECORD_TYPE_SPC130 7 #define BH_RECORD_TYPE_SPC600_256 8 #define BH_RECORD_TYPE_SPC600_4096 9 +#define CZ_RECORD_TYPE_CONFOCOR3 10 /* @@ -59,7 +61,6 @@ typedef struct { int32_t Show; } CurveMapping_t; - typedef struct { float Start; float Step; @@ -71,7 +72,6 @@ typedef struct{ int32_t VersionCode; } pq_ht3_board_settings_t; - /// The following represents the readable ASCII file header portion in a HT3 file typedef struct { char Ident[16]; //"PicoHarp 300" @@ -139,6 +139,19 @@ typedef struct { } pq_ht3_TTModeHeader_t; +/// Carl Zeiss Confocor3 raw data +typedef union cz_confocor3_settings{ + uint32_t allbits; + struct{ + char Ident[52]; + char dummy1[11]; + unsigned channel :8; + unsigned TagHead_Idx :32; + unsigned sync_rate :32; + } bits; +} cz_confocor3_settings_t; + + /// Becker&Hickl SPC132 Header typedef union bh_spc132_header{ uint32_t allbits; diff --git a/include/TTTRRecordReader.h b/include/TTTRRecordReader.h index 027a18a4..dc248786 100644 --- a/include/TTTRRecordReader.h +++ b/include/TTTRRecordReader.h @@ -6,12 +6,15 @@ #include "TTTRRecordTypes.h" -/*********************************************/ -/*** ***/ -/*** HT3 HHv1.0 ***/ -/*** https://github.com/tsbischof/libpicoquant/blob/master/src/hydraharp/hh_v20.h ***/ -/*********************************************/ +bool ProcessCzRaw( + uint32_t &TTTRRecord, + uint64_t &overflow_counter, + uint64_t &true_nsync, + uint32_t µ_time, + int16_t &channel, + int16_t &record_type +); bool ProcessSPC130( uint32_t &TTTRRecord, @@ -40,6 +43,8 @@ bool ProcessSPC600_256( int16_t &record_type ); +/* See: https://github.com/tsbischof/libpicoquant/blob/master/src/hydraharp/hh_v20.h ***/ + bool ProcessHHT2v2( uint32_t &TTTRRecord, uint64_t &overflow_counter, diff --git a/include/TTTRRecordTypes.h b/include/TTTRRecordTypes.h index 13b6329b..d4fc3937 100644 --- a/include/TTTRRecordTypes.h +++ b/include/TTTRRecordTypes.h @@ -115,5 +115,12 @@ typedef union bh_overflow{ } bh_overflow_t; +// Carl Zeiss Confocor3 raw dat +typedef union cz_confocor3_raw_record{ + uint32_t allbits; + struct { + unsigned mt :32; + } bits; +} cz_confocor3_raw_record_t; #endif //TTTRLIB_TTTRRECORDTYPES_H diff --git a/pyproject.toml b/pyproject.toml index 5fe7ca66..85265e91 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [project] -version = "0.24.0" +version = "0.24.1" name = "tttrlib" requires-python = ">=3.8" description = "Read, write & process time-tagged time-resolved (TTTR) data." diff --git a/src/Correlator.cpp b/src/Correlator.cpp index 1c105a7a..5cc209d1 100644 --- a/src/Correlator.cpp +++ b/src/Correlator.cpp @@ -539,6 +539,7 @@ std::pair, std::shared_ptr> Correlator::get_tttr() { } void Correlator::get_x_axis(double** output, int* n_output){ + if(!is_valid) run(); curve.get_x_axis(output, n_output); } diff --git a/src/TTTR.cpp b/src/TTTR.cpp index 0793df4a..82de0d00 100644 --- a/src/TTTR.cpp +++ b/src/TTTR.cpp @@ -21,11 +21,12 @@ TTTR::TTTR() : n_valid_events(0), processRecord(nullptr){ container_names.insert({std::string("PTU"), PQ_PTU_CONTAINER}); - container_names.insert({std::string("HT3"), 1}); - container_names.insert({std::string("SPC-130"), 2}); - container_names.insert({std::string("SPC-600_256"), 3}); - container_names.insert({std::string("SPC-600_4096"), 4}); - container_names.insert({std::string("PHOTON-HDF5"), 5}); + container_names.insert({std::string("HT3"), PQ_HT3_CONTAINER}); + container_names.insert({std::string("SPC-130"), BH_SPC130_CONTAINER}); + container_names.insert({std::string("SPC-600_256"), BH_SPC600_256_CONTAINER}); + container_names.insert({std::string("SPC-600_4096"), BH_SPC600_4096_CONTAINER}); + container_names.insert({std::string("PHOTON-HDF5"), PHOTON_HDF_CONTAINER}); + container_names.insert({std::string("CZ-RAW"), CZ_CONFOCOR3_CONTAINER}); header = new TTTRHeader(tttr_container_type); allocate_memory_for_records(0); } @@ -128,15 +129,15 @@ TTTR::TTTR(const char *fn, int container_type, bool read_input) : TTTR(){ } TTTR::TTTR(const char *fn, int container_type) : TTTR(fn, container_type, true) { -// try { + try { tttr_container_type_str.assign( container_names.right.at(container_type) ); -// } -// catch(...) { -// std::cerr << "Container type " << container_type -// << " not supported." << std::endl; -// } + } + catch(...) { + std::cerr << "Container type " << container_type + << " not supported." << std::endl; + } } TTTR::TTTR(const char *fn, const char *container_type) : TTTR() { @@ -259,9 +260,6 @@ int TTTR::read_file(const char *fn, int container_type) { std::clog << "-- Filename: " << fn << std::endl; std::clog << "-- Container type: " << container_type << std::endl; #endif - // clean up filename - // boost::filesystem::path p = fn; - //filename = boost::filesystem::canonical(boost::filesystem::absolute(p)).generic_string(); fn = filename.c_str(); if (container_type == PHOTON_HDF_CONTAINER) { read_hdf_file(fn); @@ -274,14 +272,29 @@ int TTTR::read_file(const char *fn, int container_type) { std::clog << "-- TTTR record type: " << tttr_record_type << std::endl; #endif processRecord = processRecord_map[tttr_record_type]; - n_records_in_file = get_number_of_records_by_file_size( + n_records_in_file = + get_number_of_records_by_file_size( fp, header->header_end, header->get_bytes_per_record() ); +#ifdef VERBOSE_TTTRLIB + std::clog << "-- TTTR record type: " << tttr_record_type << std::endl; + std::clog << "-- TTTR number of records: " << n_records_in_file << std::endl; +#endif allocate_memory_for_records(n_records_in_file); read_records(); fclose(fp); + } if( container_type == CZ_CONFOCOR3_CONTAINER) { + // Confocor raw data has no channel number in events + auto tag = header->get_tag(header->json_data, "channel"); + int channel = tag["value"]; +#ifdef VERBOSE_TTTRLIB + std::clog << "-- Confocor3 channel: " << channel << std::endl; +#endif + for(int i = 0; i < n_records_in_file; i++) { + routing_channels[i] = channel; + } } #ifdef VERBOSE_TTTRLIB std::clog << "-- Resulting number of TTTR entries: " << n_valid_events << std::endl; @@ -379,6 +392,10 @@ void TTTR::read_records( // read data in chunks to speed up the access size_t number_of_objects; size_t bytes_per_record = header->get_bytes_per_record(); +#ifdef VERBOSE_TTTRLIB + std::cout << "-- Records that will be read : " << n_rec << std::endl; + std::cout << "-- Bytes per record : " << bytes_per_record << std::endl; +#endif do{ auto tmp = (signed char*) malloc(bytes_per_record * (chunk + 1)); number_of_objects = fread(tmp, bytes_per_record, chunk, fp); @@ -389,8 +406,8 @@ void TTTR::read_records( overflow_counter, *(uint64_t *) ¯o_times[n_valid_events], *(uint32_t *) µ_times[n_valid_events], - *(int16_t *) &routing_channels[n_valid_events], - *(int16_t *) &event_types[n_valid_events] + *(int16_t *) &routing_channels[n_valid_events], + *(int16_t *) &event_types[n_valid_events] ); } free(tmp); @@ -424,12 +441,10 @@ void TTTR::set_header(TTTRHeader* v) { std::clog << "-- TTTR::set_header" << std::endl; #endif if(v != nullptr){ - header = v; + header = new TTTRHeader(*v); } } - - void TTTR::get_macro_times(unsigned long long** output, int* n_output){ get_array(n_valid_events, macro_times, output, n_output); } diff --git a/src/TTTRHeader.cpp b/src/TTTRHeader.cpp index 0e27b52e..48635762 100644 --- a/src/TTTRHeader.cpp +++ b/src/TTTRHeader.cpp @@ -71,7 +71,11 @@ TTTRHeader::TTTRHeader( default: tttr_record_type = PQ_RECORD_TYPE_HHT3v2; } - } else if(tttr_container_type == PQ_HT3_CONTAINER){ + } else if(tttr_container_type == CZ_CONFOCOR3_CONTAINER) { + header_end = read_cz_confocor3_header(fpin, json_data); + tttr_record_type = get_tag(json_data, TTTRRecordType)["value"]; + } + else if(tttr_container_type == PQ_HT3_CONTAINER){ header_end = read_ht3_header(fpin, json_data); tttr_record_type = get_tag(json_data, TTTRRecordType)["value"]; } else if(tttr_container_type == BH_SPC600_256_CONTAINER){ @@ -139,6 +143,34 @@ size_t TTTRHeader::read_bh132_header( return 4; } + +size_t TTTRHeader::read_cz_confocor3_header( + std::FILE *fpin, + nlohmann::json &data, + bool rewind +) { + if(rewind) std::fseek(fpin, 0, SEEK_SET); + cz_confocor3_settings_t rec; + fread(&rec, sizeof(rec),1, fpin); + fseek(fpin, 68, 0); + float sync_rate; + fread(&sync_rate, sizeof(float), 1, fpin); + double mt_clk = 1. / (1000. * sync_rate); + + add_tag(data, TTTRTagGlobRes, mt_clk, tyFloat8); + // Convert ASCII channel number to int + add_tag(data, "channel", rec.bits.channel - 48, tyInt8); + add_tag(data, TTTRTagBits, 32, tyInt8); + add_tag(data, TTTRRecordType, (int) CZ_RECORD_TYPE_CONFOCOR3, tyInt8); +#ifdef VERBOSE_TTTRLIB + std::clog << "-- Confocor3 header reader " << std::endl; + std::clog << "-- macro_time_resolution: " << mt_clk << std::endl; + std::clog << "-- macro_time_resolution: " << sync_rate << std::endl; +#endif + return 80; +} + + size_t TTTRHeader::read_ht3_header( std::FILE *fpin, nlohmann::json &data, diff --git a/src/TTTRRecordReader.cpp b/src/TTTRRecordReader.cpp index a014f2c4..c2b766b9 100644 --- a/src/TTTRRecordReader.cpp +++ b/src/TTTRRecordReader.cpp @@ -318,5 +318,22 @@ bool ProcessSPC600_256( return false; } +bool ProcessCzRaw( + uint32_t &TTTRRecord, + uint64_t &overflow_counter, + uint64_t &true_nsync, + uint32_t µ_time, + int16_t &channel, + int16_t &record_type +) { + cz_confocor3_raw_record_t rec; + rec.allbits = TTTRRecord; + true_nsync = rec.bits.mt + overflow_counter; + micro_time = 1; + record_type = 0; + overflow_counter += rec.bits.mt; + + return true; +} diff --git a/tools b/tools index bad454b6..83ab85c3 160000 --- a/tools +++ b/tools @@ -1 +1 @@ -Subproject commit bad454b6a6f4e0ff7adbbdea5dc8924f77fd5196 +Subproject commit 83ab85c301b06555cf27982b5f05e080a7e5742e