pax_global_header00006660000000000000000000000064147252572550014530gustar00rootroot0000000000000052 comment=c093bab550df41f64e28d7f1bf57d73c96b70f59 libbart-devel/000077500000000000000000000000001472525725500136105ustar00rootroot00000000000000libbart-devel/.codespellrc000066400000000000000000000007111472525725500161070ustar00rootroot00000000000000[codespell] # Ref: https://github.com/codespell-project/codespell#using-a-config-file skip = .git*,.codespellrc check-hidden = true # ignore all acronyms, or mixed case, some names ignore-regex = \b([A-Z]+|[a-z]+[A-Z][a-z]*|Unser|Hart L|Wil Sweldens)\b|.*d'une.* # some unfortunate choices of variable names, or even typos which are now # parts of API ignore-words-list = te,reconet,reson,nd,scond,inout,coo,dout,parm,pyramide,ist,mot,mapp,regist,noo,mone libbart-devel/.github/000077500000000000000000000000001472525725500151505ustar00rootroot00000000000000libbart-devel/.github/workflows/000077500000000000000000000000001472525725500172055ustar00rootroot00000000000000libbart-devel/.github/workflows/c-cpp.yml000066400000000000000000000010541472525725500207320ustar00rootroot00000000000000name: C/C++ CI on: push: branches: [ master ] pull_request: branches: [ master ] jobs: build: runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v2 - name: update run: sudo apt-get install -q gcc make libfftw3-dev liblapacke-dev libpng-dev libopenblas-dev gfortran - name: make all run: make all - name: make utest run: make utest - name: make test run: make test - name: make pythontest run: | sudo apt-get install -q python3 python3-numpy make pythontest libbart-devel/.github/workflows/codespell.yml000066400000000000000000000007751472525725500217130ustar00rootroot00000000000000# Codespell configuration is within .codespellrc --- name: Codespell on: push: branches: [master] pull_request: branches: [master] permissions: contents: read jobs: codespell: name: Check for spelling errors runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Annotate locations with typos uses: codespell-project/codespell-problem-matcher@v1 - name: Codespell uses: codespell-project/actions-codespell@v2 libbart-devel/.gitignore.main000066400000000000000000000011731472525725500165250ustar00rootroot00000000000000 # autogenerated .gitignore files .gitignore # dependency files *.d # object files *.o # Windows executables *.exe # mac debug files *.dSYM # python compiled files *.pyc python/.ipynb_checkpoints/ # temporary files *.swp *~ # Mac file .DS_Store # local Makefiles Makefile.local Makefiles/Makefile.* # version string src/misc/version.inc # noise simulations save/nsv/*.dat # fftw wisdoms save/fftw/*.fftw # ctags tags GTAGS GSYMS GRTAGS GPATH # autogenerated documentation doc/html doc/latex doc/dx doc/commands.txt # test files tests/test-* #clangd cache .cache/* compile_commands.json #vscode .vscode/* #wasm: *.wasm libbart-devel/.gitlab-ci.yml000066400000000000000000000344111472525725500162470ustar00rootroot00000000000000workflow: rules: # always run on master - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH # always run for merge requests - if: $CI_PIPELINE_SOURCE == "merge_request_event" # always run if triggered by "run pipleline" in gitlab - if: $CI_PIPELINE_SOURCE == "web" # always run if the ci variable CI_FORCE is 1. # for example by pushing with: git push -o ci.variable=CI_FORCE=1 - if: $CI_FORCE == "1" # Run distro tests if CI_DISTRO_TESTS=1 is set, or when it is a scheduled run - if: $CI_PIPELINE_SOURCE == "schedule" variables: CI_DISTRO_TESTS: "1" # do not run for anything else variables: OMP_NUM_THREADS: "1" # mpirun in docker has a problem with its default transfer mechanism, # so we disable it: OMPI_MCA_btl_vader_single_copy_mechanism: "none" # make DEBUG_DWARF the default DEBUG: "1" DEBUG_DWARF: "1" # utests give a backtrace + abort on the first error BART_UTEST_ABORT: "1" # For gitlab-runner using docker: create diretories as the USER in the docker image, not as root FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR: "true" image: registry.gitlab.tugraz.at/ibi/reproducibility/gitlab-ci-containers/ibi_cuda_bart default: # Make Builds interruptible by default interruptible: true stages: - distro_build - build - test1 - test2 Build_NoDEBUG: stage: build script: - DEBUG=0 DEBUG_DWARF=0 WERROR=1 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ expire_in: 45 minutes Build: stage: build script: - WERROR=1 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_riscv: stage: build tags: - riscv image: registry.gitlab.tugraz.at/ibi/reproducibility/gitlab-ci-containers/ibi_riscv_noroot script: - make -j 4 all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 180 minutes Build_Clang: stage: build script: - CC=clang-16 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_Clang_GPU: stage: build script: - CC=clang-16 CUDA_CC=clang-14 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_Static: stage: build script: # - apt-get update -qq && apt-get install -y libgfortran-12-dev - SLINK=1 make artifacts: paths: - bart expire_in: 45 minutes Build_Shared: stage: build script: - make libbart.so artifacts: paths: - libbart.so expire_in: 45 minutes Build_Shared_GPU: stage: build script: - CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make libbart.so artifacts: paths: - libbart.so expire_in: 45 minutes Build_UBSan: stage: build script: - UBSAN=1 ASAN=1 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_GPU: stage: build script: - CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" WERROR=1 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_MPI_GPU: stage: build script: - MPI=1 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_MPI: stage: build script: - MPI=1 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_LTO: stage: build script: - CFLAGS="-flto -O2 -g" DEBUG=0 DEBUG_DWARF=0 make artifacts: paths: - bart - "./lib/*.a" - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_Analyzer: stage: build image: registry.gitlab.tugraz.at/ibi/reproducibility/gitlab-ci-containers/ibi_gcc-git_bart script: - gcc-git --version - CC=gcc-git CFLAGS="-fanalyzer -Werror" DEBUG=0 DEBUG_DWARF=0 make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ expire_in: 45 minutes allow_failure: true Build_Tensorflow: stage: build script: # Already part of the Docker image # - wget --no-verbose https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-2.4.0.tar.gz # - mkdir tensorflow_dir && tar -C tensorflow_dir -xvzf libtensorflow-gpu-linux-x86_64-2.4.0.tar.gz - TENSORFLOW=1 TENSORFLOW_BASE=/tensorflow_dir/ CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make all artifacts: paths: - bart - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc expire_in: 45 minutes Build_wasm: stage: build image: registry.gitlab.tugraz.at/ibi/reproducibility/gitlab-ci-containers/ibi_wasm_bart variables: DEPS_FOLDER: /wasm_libs script: - echo -e "FFTW_BASE=$DEPS_FOLDER\n BLAS_BASE=$DEPS_FOLDER\n CC=emcc\n LDFLAGS=-s EXPORTED_FUNCTIONS=_main,__Block_object_dispose -s ALLOW_MEMORY_GROWTH=1 -s MAXIMUM_MEMORY=4GB -s INITIAL_MEMORY=256MB -s TOTAL_STACK=64MB $DEPS_FOLDER/lib/libfftw3f.a $DEPS_FOLDER/lib/libopenblas.a $DEPS_FOLDER/usr/local/lib/libBlocksRuntime.a -lnodefs.js -lnoderawfs.js\n OPENBLAS=1\n FORTRAN=0\n FFTWTHREADS=0\n USE_THREAD=1\n PNG=0\n OMP=0\n TENSORFLOW=0\n DEBUG=0\n MKL=0\n ACML=0\n CUDA=0\n CUDNN=0\n ISMRMRD=0\n" > Makefiles/Makefile.wasm - bash -c "source /emsdk/emsdk_env.sh && make -j 4 DEBUG=0 DEBUG_DWARF=0 all" artifacts: paths: - bart - bart.wasm - "./lib/*.a" - ./commands/ - "./src/*.o" - ./src/misc/version.inc - ./Makefiles/Makefile.wasm expire_in: 45 minutes UTest: stage: test1 script: - AUTOCLEAN=0 WERROR=1 make utest needs: [Build] dependencies: - Build UTest_riscv: stage: test1 tags: - riscv image: registry.gitlab.tugraz.at/ibi/reproducibility/gitlab-ci-containers/ibi_riscv_noroot script: - AUTOCLEAN=0 make -j 4 utest needs: [Build_riscv] dependencies: - Build_riscv UTest_Clang: stage: test1 script: - AUTOCLEAN=0 CC=clang-16 make utest needs: [Build_Clang] dependencies: - Build_Clang UTest_Clang_GPU: stage: test1 script: - if ! nvidia-smi ; then printf "No usable GPU found, skipping GPU tests!\n"; exit 0; fi - AUTOCLEAN=0 CC=clang-16 CUDA_CC=clang-14 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make utest_gpu needs: [Build_Clang_GPU] dependencies: - Build_Clang_GPU UTest_Valgrind: stage: test1 script: - AUTOCLEAN=0 UTESTLEAK=1 make utest needs: [Build] dependencies: - Build UTest_UBSan: stage: test1 script: - AUTOCLEAN=0 UBSAN=1 ASAN=1 make utest needs: [Build_UBSan] dependencies: - Build_UBSan UTest_GPU: stage: test1 script: - if ! nvidia-smi ; then printf "No usable GPU found, skipping GPU tests!\n"; exit 0; fi - AUTOCLEAN=0 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" WERROR=1 make utest_gpu needs: [Build_GPU] dependencies: - Build_GPU UTest_MPI: stage: test1 script: - AUTOCLEAN=0 MPI=1 make utest needs: [Build_MPI] dependencies: - Build_MPI UTest_Tensorflow: stage: test1 script: # Already part of the Docker image # - wget --no-verbose https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-2.4.0.tar.gz # - mkdir tensorflow_dir && tar -C tensorflow_dir -xvzf libtensorflow-gpu-linux-x86_64-2.4.0.tar.gz - AUTOCLEAN=0 TENSORFLOW=1 TENSORFLOW_BASE=/tensorflow_dir/ CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make utest needs: [Build_Tensorflow] dependencies: - Build_Tensorflow UTest_LTO: stage: test1 script: - AUTOCLEAN=0 CFLAGS="-flto -O2 -g" DEBUG=0 DEBUG_DWARF=0 make utest needs: [Build_LTO] dependencies: - Build_LTO UTest_wasm: stage: test1 image: registry.gitlab.tugraz.at/ibi/reproducibility/gitlab-ci-containers/ibi_wasm_bart script: - bash -c "source /emsdk/emsdk_env.sh && AUTOCLEAN=0 DEBUG=0 DEBUG_DWARF=0 make utest" needs: [Build_wasm] dependencies: - Build_wasm IntTest: stage: test2 script: - AUTOCLEAN=0 AGUE_REF=/reference_data/ WERROR=1 make test testslow testague needs: [Build] dependencies: - Build IntTest_riscv: stage: test2 tags: - riscv image: registry.gitlab.tugraz.at/ibi/reproducibility/gitlab-ci-containers/ibi_riscv_noroot script: - AUTOCLEAN=0 AGUE_REF=/reference_data/ make -j 4 test testague needs: [Build_riscv] dependencies: - Build_riscv timeout: 6 hours IntTest_Python: stage: test2 script: - AUTOCLEAN=0 make pythontest needs: [Build] dependencies: - Build allow_failure: true IntTest_GPU: stage: test2 script: - if ! nvidia-smi ; then printf "No usable GPU found, skipping GPU tests!\n"; exit 0; fi - AUTOCLEAN=0 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" WERROR=1 make gputest needs: [Build_GPU] dependencies: - Build_GPU IntTest_Clang: stage: test2 script: - AUTOCLEAN=0 CC=clang-16 make test needs: [Build_Clang] dependencies: - Build_Clang IntTest_Clang_GPU: stage: test2 script: - if ! nvidia-smi ; then printf "No usable GPU found, skipping GPU tests!\n"; exit 0; fi - AUTOCLEAN=0 CC=clang-16 CUDA_CC=clang-14 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" make gputest needs: [Build_Clang_GPU] dependencies: - Build_Clang_GPU IntTest_UBSan: stage: test2 script: - AUTOCLEAN=0 UBSAN=1 ASAN=1 DEBUG=1 UBSAN_OPTIONS=print_stacktrace=1 make test needs: [Build_UBSan] dependencies: - Build_UBSan IntTest_MPI: stage: test2 script: - AUTOCLEAN=0 MPI=1 make testslow needs: [Build_MPI] dependencies: - Build_MPI IntTest_MPI_GPU: stage: test2 script: - if ! nvidia-smi ; then printf "No usable GPU found, skipping GPU tests!\n"; exit 0; fi - AUTOCLEAN=0 CUDA=1 CUDA_LIB=lib64 GPUARCH_FLAGS="-arch sm_35 -Wno-deprecated-gpu-targets" MPI=1 make gputest needs: [Build_MPI_GPU] dependencies: - Build_MPI_GPU #IntTest_LTO: # stage: test2 # script: # - CFLAGS="-flto -O2 -g" DEBUG=0 DEBUG_DWARF=0 make test # needs: [Build_LTO] # dependencies: # - Build_LTO IntTest_wasm: stage: test2 image: registry.gitlab.tugraz.at/ibi/reproducibility/gitlab-ci-containers/ibi_wasm_bart script: - bash -c "source /emsdk/emsdk_env.sh && AUTOCLEAN=0 DEBUG=0 DEBUG_DWARF=0 make test" needs: [Build_wasm] dependencies: - Build_wasm Debian_Bookworm: variables: DEBUG: "0" DEBUG_DWARF: "0" rules: - if: $CI_DISTRO_TESTS == "1" stage: distro_build image: debian:bookworm script: - apt-get update - apt-get install -y gcc make libfftw3-dev liblapacke-dev libpng-dev libopenblas-dev gfortran - make all -j8 - make utest -j8 Debian_Trixie: variables: DEBUG: "0" DEBUG_DWARF: "0" rules: - if: $CI_DISTRO_TESTS == "1" stage: distro_build image: debian:trixie script: - apt-get update - apt-get install -y gcc make libfftw3-dev liblapacke-dev libpng-dev libopenblas-dev gfortran - make all -j8 - make utest -j8 Fedora_39: variables: DEBUG: "0" DEBUG_DWARF: "0" rules: - if: $CI_DISTRO_TESTS == "1" stage: distro_build image: fedora:39 script: - dnf install -y gcc make fftw-devel lapack-devel openblas-devel atlas-devel libpng-devel - make all -j8 - make utest -j8 Fedora_40: variables: DEBUG: "0" DEBUG_DWARF: "0" rules: - if: $CI_DISTRO_TESTS == "1" stage: distro_build image: fedora:40 script: - dnf install -y gcc make fftw-devel lapack-devel openblas-devel atlas-devel libpng-devel util-linux-core - make all -j8 - make utest -j8 RockyLinux_8: variables: DEBUG: "0" DEBUG_DWARF: "0" rules: - if: $CI_DISTRO_TESTS == "1" stage: distro_build image: docker.io/rockylinux:8 script: - dnf --enablerepo=powertools install -y fftw-devel atlas-devel libpng-devel lapack-devel gcc-toolset-12 - source scl_source enable gcc-toolset-12 # redhat magic for enabling gcc-12 - make all -j8 - make utest -j8 RockyLinux_9: variables: DEBUG: "0" DEBUG_DWARF: "0" rules: - if: $CI_DISTRO_TESTS == "1" stage: distro_build image: docker.io/rockylinux:9 script: - dnf --enablerepo=crb install -y fftw-devel atlas-devel libpng-devel lapack-devel gcc-toolset-12 - source scl_source enable gcc-toolset-12 # redhat magic for enabling gcc-12 - make all -j8 - make utest -j8 Ubuntu_22.04: variables: DEBUG: "0" DEBUG_DWARF: "0" rules: - if: $CI_DISTRO_TESTS == "1" stage: distro_build image: docker.io/ubuntu:22.04 script: - apt-get update - apt-get install -y gcc-12 make libfftw3-dev liblapacke-dev libpng-dev libopenblas-dev gfortran - echo -e -n "CC=gcc-12\n" > Makefiles/Makefile.local - make all -j8 - make utest -j8 Ubuntu_24.04: variables: DEBUG: "0" DEBUG_DWARF: "0" rules: - if: $CI_DISTRO_TESTS == "1" stage: distro_build image: docker.io/ubuntu:24.04 script: - apt-get update - apt-get install -y gcc make libfftw3-dev liblapacke-dev libpng-dev libopenblas-dev gfortran - make all -j8 - make utest -j8 Arch_Latest: variables: DEBUG: "0" DEBUG_DWARF: "0" rules: - if: $CI_DISTRO_TESTS == "1" stage: distro_build image: docker.io/archlinux:base-devel script: - pacman -Sy --noconfirm blas-openblas fftw libpng - echo -e -n "CPPFLAGS+=-I/usr/include/openblas\nAR_LOCK_NEEDED=0\n" > Makefiles/Makefile.local - make all -j8 - make utest -j8 allow_failure: true libbart-devel/ACKNOWLEDGEMENTS000066400000000000000000000017301472525725500160660ustar00rootroot00000000000000 We want to acknowledge the following persons for supporting BART by contributing source code, testing, feedback, data, bug reports, etc. (alphabetical) Marcus T. Alley Michael Anderson Jakob Asslaender Dara Bahri Yaël Balbastre Moritz Blumenthal Pim Borman Soumick Chatterjee Joseph Y. Cheng Nguyen Damien Sofia Dimoudi Philipp Ehses Alexander Fyrdahl Siddharth Iyer Hans Johnson Tamás Hakkel Martin Heide Christian Holme Yuxin Hu Gregory R. Lee Evan G. Levine Gilad Liberman Max Litster Tim Loderhose Michael Lustig Martin Krämer Sidharth Kumar Lyu Mengye Damien Nguyen Frank Ong Bernhard Rapp Melvin Robinson Volkert Roeloffs William A. Romero Sebastian Rosenzweig Philip Schaten Nick Scholand Jasper Schoormans Efrat Shimron David Smith Martin Strunz Jonathan I. Tamir Michelle Tamir (logo) Zhengguo Tan Johannes Töger Christian Tönnes Aurélien Trotier Martin Uecker Shreyas S. Vasanawala Sana Vaziri Marten Veldmann Patrick Virtue Xiaoqing Wang Simon Yeung Tao Zhang libbart-devel/LICENSE000066400000000000000000000031461472525725500146210ustar00rootroot00000000000000Copyright (c) 2013-2018. The Regents of the University of California. Copyright (c) 2013-2024. BART Developer Team and Contributors. Copyright (c) 2012. Intel Corporation. (src/lapacke/) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. libbart-devel/Makefile000066400000000000000000000601441472525725500152550ustar00rootroot00000000000000# Copyright 2013-2015. The Regents of the University of California. # Copyright 2015-2022. Martin Uecker # Copyright 2022-2024. Institute of Biomedical Imaging, TU Graz. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # silent make #MAKEFLAGS += --silent # auto clean on makefile updates AUTOCLEAN?=1 # How it works: older versions of Make will be sorted before 4.0, making the comparison fail ifneq (4.0,$(firstword $(sort $(MAKE_VERSION) 4.0))) $(error bart requires version 4.0 of GNU make or newer!) endif # clear out all implicit rules MAKEFLAGS += --no-builtin-rules # clear out some variables by hand, as we cannot use -R, --no-builtin-variables without recursive make # but only undefine them if they come from their default values define undef_builtin ifeq ($(origin $(1)),default) undefine $(1) endif endef $(eval $(foreach VAR,CC CXX CPP LD ARFLAGS ,$(eval $(call undef_builtin,$(VAR))))) # Paths here = $(realpath $(dir $(lastword $(MAKEFILE_LIST)))) root := $(here) srcdir = $(root)/src libdir = $(root)/lib bindir = $(root)/bin export LOCKDIR?=${libdir} export BART_TOOLBOX_PATH=$(root) MAKEFILES = $(wildcard $(root)/Makefiles/Makefile.*) ALLMAKEFILES = $(root)/Makefile $(wildcard $(root)/Makefile.* $(root)/*.mk $(root)/rules/*.mk $(root)/Makefiles/Makefile.*) -include Makefile.$(NNAME) -include Makefile.local -include $(MAKEFILES) # some operations might still be non deterministic NON_DETERMINISTIC?=0 # allow blas calls within omp regions (fails on Debian 9, openblas) BLAS_THREADSAFE?=0 # use for ppc64le HPC MPI?=0 OPENBLAS?=0 MKL?=0 CUDA?=0 CUDNN?=0 ACML?=0 OMP?=1 SLINK?=0 DEBUG?=0 UBSAN?=0 ASAN?=0 FFTWTHREADS?=1 SCALAPACK?=0 ISMRMRD?=0 TENSORFLOW?=0 NOEXEC_STACK?=0 PARALLEL?=0 PARALLEL_NJOBS?= FORTRAN?=1 PNG?=1 DEBUG_DWARF?=0 WERROR?=0 LOG_BACKEND?=0 LOG_SIEMENS_BACKEND?=0 LOG_ORCHESTRA_BACKEND?=0 LOG_GADGETRON_BACKEND?=0 # The fix that makes AR_LOCK unnecessary is in GNU Make version 4.4.1 # Since MAKE_VERSION only shows the first two version numbers, we need this test # to be for 4.5 instead of 4.4. # But we will just comment it out for now, since GNU Make 4.5 does not exist yet, # and we do not want any surprises if this happens to break anything #ifeq (4.5,$(firstword $(sort $(MAKE_VERSION) 4.5))) #AR_LOCK_NEEDED?=0 #else AR_LOCK_NEEDED?=1 #endif DESTDIR ?= / PREFIX ?= usr/local/ BUILDTYPE = Linux UNAME = $(shell uname -s) MNAME = $(shell uname -m) NNAME = $(shell uname -n) MYLINK=ln ifeq ($(UNAME),Darwin) BUILDTYPE = MacOSX MYLINK = ln -s endif ifeq ($(BUILDTYPE), MacOSX) MACPORTS ?= 1 else MACPORTS ?= 0 endif ifeq ($(BUILDTYPE), Linux) # as the defaults changed on most Linux distributions # explicitly specify non-deterministic archives to not break make ARFLAGS ?= rsU else ARFLAGS ?= rs endif ifeq ($(UNAME),Cygwin) BUILDTYPE = Cygwin NOLAPACKE ?= 1 endif ifeq ($(UNAME),CYGWIN_NT-10.0) BUILDTYPE = Cygwin NOLAPACKE ?= 1 endif ifneq (,$(findstring MSYS,$(UNAME))) BUILDTYPE = MSYS #LDFLAGS += -lucrtbase # support for %F, %T formatting codes in strftime() #LDFLAGS += -static-libgomp NOLAPACKE ?= 1 SLINK = 1 endif ifeq ($(CC),emcc) BUILDTYPE = WASM endif # Automatic dependency generation DEPFILE = $(*D)/.$(*F).d DEPFLAG = -MMD -MF $(DEPFILE) ALLDEPS = $(shell find $(srcdir) utests -name ".*.d") # Compilation flags ifeq ($(DEBUG_DWARF),1) DEBUG=1 endif ifneq ($(DEBUG),1) OPT = -O2 else OPT = -Og endif #OPT += -ffp-contract=off CPPFLAGS ?= -Wall -Wextra CFLAGS ?= $(OPT) -Wmissing-prototypes -Wincompatible-pointer-types -Wsign-conversion CXXFLAGS ?= $(OPT) ifeq ($(BUILDTYPE), MacOSX) CC ?= gcc-mp-12 else CC ?= gcc ifneq ($(BUILDTYPE), MSYS) # for symbols in backtraces LDFLAGS += -rdynamic endif endif # for debug backtraces ifeq ($(DEBUG_DWARF),1) LIBS += -ldw -lunwind CPPFLAGS += -DUSE_DWARF endif ifeq ($(WERROR),1) CFLAGS += -Werror endif ifeq ($(MNAME),riscv64) CFLAGS+=-ffp-contract=off endif # openblas ifeq ($(BUILDTYPE), MSYS) BLAS_BASE ?= /mingw64/include/OpenBLAS/ else ifneq ($(BUILDTYPE), MacOSX) BLAS_BASE ?= /usr/ else ifeq ($(MACPORTS),1) BLAS_BASE ?= /opt/local/ CPPFLAGS += -DUSE_MACPORTS endif BLAS_BASE ?= /usr/local/opt/openblas/ endif endif ifeq ($(BUILDTYPE), Linux) ifneq ($(OPENBLAS), 1) ifneq (,$(findstring Red Hat,$(shell gcc --version))) CPPFLAGS+=-I/usr/include/lapacke/ LDFLAGS+=-L/usr/lib64/atlas -ltatlas endif endif endif # cuda CUDA_BASE ?= /usr/ CUDA_LIB ?= lib CUDNN_BASE ?= $(CUDA_BASE) CUDNN_LIB ?= lib64 # tensorflow TENSORFLOW_BASE ?= /usr/local/ # acml ACML_BASE ?= /usr/local/acml/acml4.4.0/gfortran64_mp/ # mkl MKL_BASE ?= /opt/intel/mkl/lib/intel64/ # fftw ifneq ($(BUILDTYPE), MacOSX) FFTW_BASE ?= /usr/ else FFTW_BASE ?= /opt/local/ endif # Matlab MATLAB_BASE ?= /usr/local/matlab/ # ISMRM ISMRM_BASE ?= /usr/local/ismrmrd/ # Main build targets # TBASE=show slice crop resize join transpose squeeze flatten zeros ones flip circshift extract repmat bitmask reshape version delta copy casorati vec poly index multicfl tee trx TFLP=scale invert conj fmac saxpy sdot spow cpyphs creal carg normalize cdf97 pattern nrmse mip avg cabs zexp calc TNUM=fft fftmod fftshift noise bench threshold conv rss filter nlmeans mandelbrot wavelet window var std fftrot roistat pol2mask conway morphop TRECO=pics pocsense sqpics itsense nlinv moba nufft nufftbase rof tgv ictv sake wave lrmatrix estdims estshift estdelay wavepsf wshfl rtnlinv mobafit grog TCALIB=ecalib ecaltwo caldir walsh cc ccapply rovir calmat svd estvar whiten rmfreq ssa bin psf ncalib TMRI=homodyne poisson twixread fakeksp looklocker upat fovshift TSIM=phantom traj signal epg sim raga TIO=toimg TNN=reconet nnet onehotenc measure mnist tensorflow nlinvnet TMOTION=affinereg interpolate estmotion MODULES = -lnum -lmisc -lnum -lmisc ifeq ($(BUILDTYPE), MSYS) MODULES += -lwin endif MODULES_pics = -lgrecon -lsense -lmotion -liter -llinops -lwavelet -llowrank -lnoncart -lnn -lnlops MODULES_sqpics = -lsense -liter -llinops -lwavelet -llowrank -lnoncart -llinops MODULES_pocsense = -lsense -liter -llinops -lwavelet MODULES_nlinv = -lnoir -lgrecon -lwavelet -llowrank -lnn -liter -lnlops -llinops -lnoncart MODULES_ncalib = -lnoir -lgrecon -lwavelet -llowrank -lnn -liter -lnlops -llinops -lnoncart MODULES_rtnlinv = -lnoir -liter -lnlops -llinops -lnoncart MODULES_moba = -lmoba -lnoir -lnn -lnlops -llinops -lwavelet -lnoncart -lsimu -lgrecon -llowrank -llinops -liter -lnn MODULES_mobafit = -lmoba -lnlops -llinops -lsimu -liter -lnoir MODULES_bpsense = -lsense -lnoncart -liter -llinops -lwavelet MODULES_itsense = -liter -llinops MODULES_ecalib = -lcalib -llinops MODULES_ecaltwo = -lcalib -llinops MODULES_estdelay = -lcalib MODULES_caldir = -lcalib MODULES_walsh = -lcalib MODULES_calmat = -lcalib MODULES_cc = -lcalib -llinops MODULES_ccapply = -lcalib -llinops MODULES_estvar = -lcalib MODULES_nufft = -lnoncart -liter -llinops MODULES_rof = -liter -llinops MODULES_tgv = -liter -llinops MODULES_ictv = -liter -llinops MODULES_bench = -lwavelet -llinops MODULES_phantom = -lsimu -lgeom MODULES_bart = -lbox -lgrecon -lsense -lnoir -liter -llinops -lwavelet -llowrank -lnoncart -lcalib -lsimu -lsake -lnlops -lnetworks -lnoir -lnn -liter -lmoba -lgeom -lnn -lmotion -lnlops MODULES_sake = -lsake MODULES_traj = -lnoncart MODULES_raga = -lnoncart MODULES_wave = -liter -lwavelet -llinops -llowrank MODULES_threshold = -llowrank -liter -llinops -lwavelet MODULES_fakeksp = -lsense -llinops MODULES_lrmatrix = -llowrank -liter -llinops -lnlops MODULES_estdims = MODULES_ismrmrd = -lismrm MODULES_wavelet = -llinops -lwavelet MODULES_wshfl = -lgrecon -lsense -liter -llinops -lwavelet -llowrank -lnoncart -lnlops -lnn -lnlops MODULES_ssa = -lcalib MODULES_bin = -lcalib MODULES_signal = -lsimu MODULES_pol2mask = -lgeom MODULES_epg = -lsimu MODULES_reconet = -lgrecon -lnetworks -lnoncart -lnn -lnlops -llinops -liter MODULES_mnist = -lnetworks -lnn -lnlops -llinops -liter MODULES_nnet = -lgrecon -lnetworks -lnoncart -lnn -lnlops -llinops -liter MODULES_tensorflow = -lnn -lnlops -llinops -liter MODULES_measure = -lgrecon -lnetworks -lnoncart -lnn -lnlops -llinops -liter MODULES_onehotenc = -lnn MODULES_sim = -lsimu MODULES_morphop = -lnlops -llinops -lgeom MODULES_psf = -lnoncart -llinops MODULES_nlinvnet = -lnetworks -lnoir -liter -lnn -lnlops -llinops -lnoncart -lgrecon -lnetworks -lsense -liter -llinops -lwavelet -llowrank -lnoncart -lnlops -lnn MODULES_grog = -lcalib MODULES_affinereg = -lmotion -liter -lnlops -llinops MODULES_estmotion = -lmotion -lnn -liter -lnlops -llinops MODULES_interpolate = -lmotion -liter -lnlops -llinops GCCVERSION12 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 12) GCCVERSION14 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 14) # clang ifeq ($(findstring clang,$(CC)),clang) CFLAGS += -fblocks LDFLAGS += -lBlocksRuntime ifeq ($(DEBUG_DWARF),1) CFLAGS += -gdwarf -gdwarf-aranges endif # Make complains if $(error ...) is indented by tab: ifeq ($(MPI),1) $(error ERROR MPI is not support with clang, please compile with gcc) endif else ifeq ($(findstring emcc,$(CC)),emcc) CFLAGS += -fblocks else # only add if not clang, as it doesn't understand this: ifeq ($(GCCVERSION14), 1) CFLAGS += -Wuseless-cast -Wjump-misses-init else ifeq ($(GCCVERSION12), 1) CFLAGS += -Wno-vla-parameter -Wno-nonnull -Wno-maybe-uninitialized else $(warning ERROR: GCC version 12 or newer is required) endif endif endif CXX ?= g++ LINKER ?= $(CC) ifeq ($(ISMRMRD),1) TMRI += ismrmrd MODULES_bart += -lismrm endif ifeq ($(NOLAPACKE),1) CPPFLAGS += -DNOLAPACKE MODULES += -llapacke endif ifeq ($(TENSORFLOW),1) CPPFLAGS += -DTENSORFLOW -I$(TENSORFLOW_BASE)/include LIBS += -L$(TENSORFLOW_BASE)/lib -Wl,-rpath $(TENSORFLOW_BASE)/lib -ltensorflow_framework -ltensorflow endif XTARGETS += $(TBASE) $(TFLP) $(TNUM) $(TIO) $(TRECO) $(TCALIB) $(TMRI) $(TSIM) $(TNN) $(TMOTION) XTARGETS:=$(sort $(XTARGETS)) # CTARGETS: command targets, that are in the commands/ subdir CTARGETS = $(addprefix commands/, $(XTARGETS)) ifeq ($(DEBUG),1) CPPFLAGS += -g CFLAGS += -g NVCCFLAGS += -g endif ifeq ($(UBSAN),1) CFLAGS += -fsanitize=undefined,bounds-strict -fno-sanitize-recover=all ifeq ($(DEBUG),0) CFLAGS += -fsanitize-undefined-trap-on-error endif endif ifeq ($(ASAN),1) CFLAGS += -fsanitize=address endif ifeq ($(NOEXEC_STACK),1) CPPFLAGS += -DNOEXEC_STACK endif ifeq ($(PARALLEL),1) MAKEFLAGS += -j$(PARALLEL_NJOBS) endif CPPFLAGS += $(DEPFLAG) -iquote $(srcdir)/ CFLAGS += -std=gnu17 CXXFLAGS += -std=c++14 default: bart .gitignore -include $(ALLDEPS) # cuda NVCC?=$(CUDA_BASE)/bin/nvcc ifeq ($(CUDA),1) CUDA_H := -I$(CUDA_BASE)/include CPPFLAGS += -DUSE_CUDA $(CUDA_H) ifeq ($(CUDNN),1) CUDNN_H := -I$(CUDNN_BASE)/include CPPFLAGS += -DUSE_CUDNN $(CUDNN_H) endif ifeq ($(BUILDTYPE), MacOSX) CUDA_L := -L$(CUDA_BASE)/$(CUDA_LIB) -lcufft -lcudart -lcublas -m64 -lstdc++ else ifeq ($(CUDNN),1) CUDA_L := -L$(CUDA_BASE)/$(CUDA_LIB) -L$(CUDNN_BASE)/$(CUDNN_LIB) -lcudnn -lcufft -lcudart -lcublas -lstdc++ -Wl,-rpath $(CUDA_BASE)/$(CUDA_LIB) else CUDA_L := -L$(CUDA_BASE)/$(CUDA_LIB) -lcufft -lcudart -lcublas -lstdc++ -Wl,-rpath $(CUDA_BASE)/$(CUDA_LIB) endif endif else CUDA_H := CUDA_L := endif # sm_20 no longer supported in CUDA 9 GPUARCH_FLAGS ?= CUDA_CC ?= $(CC) NVCCFLAGS += -DUSE_CUDA -Xcompiler -fPIC -O2 $(GPUARCH_FLAGS) -I$(srcdir)/ -m64 -ccbin $(CUDA_CC) #NVCCFLAGS = -Xcompiler -fPIC -Xcompiler -fopenmp -O2 -I$(srcdir)/ %.o: %.cu $(NVCC) $(NVCCFLAGS) -c $^ -o $@ $(NVCC) $(NVCCFLAGS) -M $^ -o $(DEPFILE) # OpenMP ifeq ($(OMP),1) ifneq ($(BUILDTYPE), MacOSX) CFLAGS += -fopenmp CXXFLAGS += -fopenmp NVCCFLAGS += -Xcompiler -fopenmp else ifeq ($(MACPORTS),1) CFLAGS += -fopenmp CXXFLAGS += -fopenmp NVCCFLAGS += -Xcompiler -fopenmp else LDFLAGS += "-L/usr/local/opt/libomp/lib" -lomp CPPFLAGS += "-I/usr/local/opt/libomp/include" -Xclang -fopenmp endif endif else CFLAGS += -Wno-unknown-pragmas CXXFLAGS += -Wno-unknown-pragmas endif # Message Passing Interface ifeq ($(MPI),1) CFLAGS += -DUSE_MPI CC = mpicc endif # BLAS/LAPACK ifeq ($(SCALAPACK),1) BLAS_L := -lopenblas -lscalapack CPPFLAGS += -DUSE_OPENBLAS CFLAGS += -DUSE_OPENBLAS else ifeq ($(ACML),1) BLAS_H := -I$(ACML_BASE)/include BLAS_L := -L$(ACML_BASE)/lib -lgfortran -lacml_mp -Wl,-rpath $(ACML_BASE)/lib CPPFLAGS += -DUSE_ACML else ifeq ($(BUILDTYPE), MSYS) BLAS_H := -I$(BLAS_BASE) else BLAS_H := -I$(BLAS_BASE)/include endif ifeq ($(BUILDTYPE), MacOSX) BLAS_L := -L$(BLAS_BASE)/lib -lopenblas else ifeq ($(BUILDTYPE), MSYS) BLAS_L := -L/mingw64/lib -lopenblas else ifeq ($(BUILDTYPE), WASM) BLAS_L := -L$(BLAS_BASE)/lib else BLAS_L := -Wl,-rpath $(BLAS_BASE)/lib -L$(BLAS_BASE)/lib ifeq ($(NOLAPACKE),1) BLAS_L += -llapack -lblas CPPFLAGS += -Isrc/lapacke else ifeq ($(OPENBLAS), 1) ifeq ($(FORTRAN), 0) BLAS_L += -lopenblas else BLAS_L += -llapacke -lopenblas endif CPPFLAGS += -DUSE_OPENBLAS CFLAGS += -DUSE_OPENBLAS else BLAS_L += -llapacke -lblas endif endif endif endif endif endif endif ifeq ($(MKL),1) BLAS_H := -I$(MKL_BASE)/include BLAS_L := -L$(MKL_BASE)/lib/intel64 -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core CPPFLAGS += -DUSE_MKL -DMKL_Complex8="complex float" -DMKL_Complex16="complex double" CFLAGS += -DUSE_MKL -DMKL_Complex8="complex float" -DMKL_Complex16="complex double" endif ifeq ($(BLAS_THREADSAFE),1) CPPFLAGS += -DBLAS_THREADSAFE CFLAGS += -DBLAS_THREADSAFE endif ifeq ($(NON_DETERMINISTIC),1) CPPFLAGS += -DNON_DETERMINISTIC CFLAGS += -DNON_DETERMINISTIC NVCCFLAGS += -DNON_DETERMINISTIC endif CPPFLAGS += $(FFTW_H) $(BLAS_H) # librt ifeq ($(BUILDTYPE), MacOSX) LIBRT := else LIBRT := -lrt endif # png ifeq ($(PNG), 0) PNG_L := CFLAGS += -DNO_PNG CPPFLAGS += -DNO_PNG else PNG_L := -lpng endif ifeq ($(SLINK),1) PNG_L += -lz ifeq ($(DEBUG_DWARF),1) LIBS += -lelf -lz -llzma -lbz2 endif endif ifeq ($(LINKER),icc) PNG_L += -lz endif # fftw FFTW_H := -I$(FFTW_BASE)/include/ ifeq ($(BUILDTYPE), WASM) FFTW_L := -L$(FFTW_BASE)/lib -lfftw3f else FFTW_L := -Wl,-rpath $(FFTW_BASE)/lib -L$(FFTW_BASE)/lib -lfftw3f endif ifeq ($(FFTWTHREADS),1) ifneq ($(BUILDTYPE), MSYS) FFTW_L += -lfftw3f_threads CPPFLAGS += -DFFTWTHREADS endif endif # Matlab MATLAB_H := -I$(MATLAB_BASE)/extern/include MATLAB_L := -Wl,-rpath $(MATLAB_BASE)/bin/glnxa64 -L$(MATLAB_BASE)/bin/glnxa64 -lmat -lmx -lm -lstdc++ # ISMRM ifeq ($(ISMRMRD),1) ISMRM_H := -I$(ISMRM_BASE)/include ISMRM_L := -L$(ISMRM_BASE)/lib -lismrmrd ISMRM_H += -I /usr/include/hdf5/serial/ else ISMRM_H := ISMRM_L := endif # Logging backends ifeq ($(LOG_BACKEND),1) CPPFLAGS += -DUSE_LOG_BACKEND ifeq ($(LOG_SIEMENS_BACKEND),1) miscextracxxsrcs += $(srcdir)/misc/UTrace.cc endif ifeq ($(LOG_ORCHESTRA_BACKEND),1) miscextracxxsrcs += $(srcdir)/misc/Orchestra.cc endif endif ifeq ($(ISMRMRD),1) miscextracxxsrcs += $(srcdir)/ismrm/xml_wrapper.cc CPPFLAGS += $(ISMRM_H) LIBS += -lstdc++ endif # change for static linking ifeq ($(SLINK),1) ifeq ($(SCALAPACK),1) BLAS_L += -lgfortran -lquadmath else # work around fortran problems with static linking LDFLAGS += -static -Wl,--whole-archive -lpthread -Wl,--no-whole-archive -Wl,--allow-multiple-definition ifneq ($(BUILDTYPE), MSYS) LIBS += -lmvec BLAS_L += -llapack -lblas endif BLAS_L += -lgfortran -lquadmath endif endif # Modules .LIBPATTERNS := lib%.a vpath %.a lib vpath % commands/ boxextrasrcs := $(XTARGETS:%=src/%.c) define alib $(1)srcs := $(wildcard $(srcdir)/$(1)/*.c) $(1)cudasrcs := $(wildcard $(srcdir)/$(1)/*.cu) $(1)objs := $$($(1)srcs:.c=.o) $(1)objs += $$($(1)extrasrcs:.c=.o) $(1)objs += $$($(1)extracxxsrcs:.cc=.o) ifeq ($(CUDA),1) $(1)objs += $$($(1)cudasrcs:.cu=.o) endif .INTERMEDIATE: $$($(1)objs) lib/lib$(1).a: lib$(1).a($$($(1)objs)) endef ALIBS = misc num grecon sense noir iter linops wavelet lowrank noncart calib simu sake nlops moba lapacke box geom networks nn motion ifeq ($(ISMRMRD),1) ALIBS += ismrm endif ifeq ($(BUILDTYPE), MSYS) ALIBS += win endif $(eval $(foreach t,$(ALIBS),$(eval $(call alib,$(t))))) # additional rules for lib misc $(shell $(root)/rules/update_version.sh) $(srcdir)/misc/version.o: $(srcdir)/misc/version.inc # additional rules for lib ismrm lib/libismrm.a: CPPFLAGS += $(ISMRM_H) # additional rules for lib box lib/libbox.a: CPPFLAGS += -DMAIN_LIST="$(XTARGETS:%=%,) ()" -include src/main.h # lib calib UTARGETS += test_grog MODULES_test_grog += -lcalib -lnoncart -lsimu -lgeom # lib linop UTARGETS += test_linop_matrix test_linop test_padding MODULES_test_linop += -llinops MODULES_test_linop_matrix += -llinops MODULES_test_padding += -llinops # lib lowrank UTARGETS += test_batchsvd MODULES_test_batchsvd = -llowrank # lib misc UTARGETS += test_pattern test_types test_misc test_memcfl test_tree test_streams # lib moba UTARGETS += test_moba MODULES_test_moba += -lmoba -lnoir -llowrank -lwavelet -liter -lnlops -llinops -lsimu # lib nlop UTARGETS += test_nlop test_nlop_jacobian MODULES_test_nlop += -lnlops -lnoncart -llinops -liter MODULES_test_nlop_jacobian += -lnlops -llinops # lib noncart UTARGETS += test_nufft test_fib MODULES_test_nufft += -lnoncart -llinops MODULES_test_fib += -lnoncart # lib num UTARGETS += test_multind test_flpmath test_splines test_linalg test_polynom test_window test_conv test_ode test_nlmeans test_rand test_matexp UTARGETS += test_blas test_mdfft test_ops test_ops_p test_flpmath2 test_convcorr test_specfun test_qform test_fft test_gaussians ifeq ($(MPI),1) UTARGETS += test_mpi test_mpi_multind test_mpi_flpmath test_mpi_fft endif UTARGETS_GPU += test_cudafft test_cuda_flpmath test_cuda_flpmath2 test_cuda_gpukrnls test_cuda_convcorr test_cuda_multind test_cuda_shuffle test_cuda_memcache_clear test_cuda_rand # lib simu UTARGETS += test_ode_bloch test_ode_simu test_biot_savart test_signals test_epg test_pulse test_tsegf MODULES_test_ode_bloch += -lsimu MODULES_test_ode_simu += -lsimu MODULES_test_biot_savart += -lsimu MODULES_test_signals += -lsimu MODULES_test_epg += -lsimu MODULES_test_pulse += -lsimu MODULES_test_tsegf += -lsimu # lib geom UTARGETS += test_geom MODULES_test_geom += -lgeom # lib iter UTARGETS += test_iter test_prox test_prox2 MODULES_test_iter += -liter -lnlops -llinops MODULES_test_prox += -liter -llinops MODULES_test_prox2 += -liter -llinops -lnlops # lib nn ifeq ($(TENSORFLOW),1) UTARGETS += test_nn_tf MODULES_test_nn_tf += -lnn -lnlops -llinops endif UTARGETS += test_nn_ops test_nn MODULES_test_nn_ops += -lnn -lnlops -llinops -liter MODULES_test_nn += -lnn -lnlops -llinops -liter UTARGETS += test_affine MODULES_test_affine+= -lmotion -lnlops -llinops -liter UTARGETS_GPU += test_cuda_affine MODULES_test_cuda_affine+= -lmotion -lnlops -llinops -liter .gitignore: .gitignore.main Makefile* @echo '# AUTOGENERATED. DO NOT EDIT. (are you looking for .gitignore.main ?)' > .gitignore cat .gitignore.main >> .gitignore @echo /bart >> .gitignore @echo $(patsubst %, /%, $(CTARGETS) $(UTARGETS) $(UTARGETS_GPU)) | tr ' ' '\n' >> .gitignore doc/commands.txt: bart ./rules/update_commands.sh ./bart doc/commands.txt $(XTARGETS) .PHONY: doxygen doxygen: makedoc.sh doxyconfig bart ./makedoc.sh all: .gitignore $(CTARGETS) bart # special targets $(CTARGETS): CPPFLAGS += -DMAIN_LIST="$(XTARGETS:%=%,) ()" -include src/main.h bart: CPPFLAGS += -DMAIN_LIST="$(XTARGETS:%=%,) ()" -include src/main.h mat2cfl: $(srcdir)/mat2cfl.c -lnum -lmisc $(CC) $(CFLAGS) $(MATLAB_H) -omat2cfl $+ $(MATLAB_L) $(CUDA_L) # implicit rules %.o: %.c $(CC) $(CPPFLAGS) $(CFLAGS) -c $< -o $@ %.o: %.cc $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@ # since GNU Make 4.4, archive members can be built separately from # compilation, which means we do not need the special support for parallel # building of archive members anymore # see: https://www.gnu.org/software/make/manual/html_node/Archive-Pitfalls.html ifeq ($(AR_LOCK_NEEDED),1) # use for parallel make AR=./ar_lock.sh (%): % $(AR) $(ARFLAGS) $@ $% else # clear default archive member rule: (%) : % ; # when building .a files: run AR with all new .o files ($?) %.a : ; $(AR) $(ARFLAGS) $@ $? endif .SECONDEXPANSION: $(CTARGETS): commands/% : src/main.c $(srcdir)/%.o $$(MODULES_%) $(MODULES) $(LINKER) $(LDFLAGS) $(CFLAGS) $(CPPFLAGS) -Dmain_real=main_$(@F) $+ $(FFTW_L) $(CUDA_L) $(BLAS_L) $(PNG_L) $(ISMRM_L) $(LIBS) -lm $(LIBRT) -o $@ ifeq ($(BUILDTYPE), WASM) ./rules/add_node_shebang.sh $@ endif .SECONDEXPANSION: bart: % : src/main.c $(srcdir)/%.o $$(MODULES_%) $(MODULES) ifeq ($(SHARED),1) $(LINKER) $(LDFLAGS) -shared $(CFLAGS) $(CPPFLAGS) -Dmain_real=main_$@ $+ $(FFTW_L) $(CUDA_L) $(BLAS_L) $(PNG_L) $(ISMRM_L) $(LIBS) -lm $(LIBRT) -o bart.o else $(LINKER) $(LDFLAGS) $(CFLAGS) $(CPPFLAGS) -Dmain_real=main_$(@F) $+ $(FFTW_L) $(CUDA_L) $(BLAS_L) $(PNG_L) $(ISMRM_L) $(LIBS) -lm $(LIBRT) -o $@ endif ifeq ($(BUILDTYPE), WASM) ./rules/add_node_shebang.sh $@ endif UTESTS=$(shell $(root)/utests/utests-collect.sh ./utests/$@.c) .SECONDEXPANSION: $(UTARGETS): % : utests/utest.c utests/%.o $$(MODULES_%) $(MODULES) $(CC) $(LDFLAGS) $(CFLAGS) $(CPPFLAGS) -DUTESTS="$(UTESTS)" $+ $(FFTW_L) $(CUDA_L) $(BLAS_L) $(LIBS) -lm $(LIBRT) -o $@ UTESTS_GPU=$(shell $(root)/utests/utests_gpu-collect.sh ./utests/$@.c) .SECONDEXPANSION: $(UTARGETS_GPU): % : utests/utest.c utests/%.o $$(MODULES_%) $(MODULES) $(CC) $(LDFLAGS) $(CFLAGS) $(CPPFLAGS) -DUTESTS="$(UTESTS_GPU)" -DUTEST_GPU $+ $(FFTW_L) $(CUDA_L) $(BLAS_L) $(LIBS) -lm $(LIBRT) -o $@ # linker script version - does not work on MacOS X # $(CC) $(LDFLAGS) -Wl,-Tutests/utests.ld $(CFLAGS) -o $@ $+ $(FFTW_L) $(CUDA_L) $(BLAS_L) -lm -rt # automatic tests # system tests ROOTDIR=$(root) TOOLDIR=$(root)/commands TESTS_DIR=$(root)/tests TESTS_TMP=$(TESTS_DIR)/tmp/$$$$ TESTS_OUT=$(TESTS_DIR)/out include $(root)/tests/*.mk ifeq ($(BUILDTYPE), MSYS) TMP_TESTS := $(TESTS) NOT_SUPPORTED=tests/test-io tests/test-io2 tests/test-join-append tests/test-join-append-one tests/test-whiten TESTS = $(filter-out $(NOT_SUPPORTED),$(TMP_TESTS)) endif test: ${TESTS} testslow: ${TESTS_SLOW} testague: ${TESTS_AGUE} # test importing *.dat-files specified in tests/twixread.mk gputest: ${TESTS_GPU} pythontest: ${TESTS_PYTHON} # unit tests UTEST_RUN= ifeq ($(MPI),1) # only cfl files allowed with MPI UTARGETS:=$(filter-out test_memcfl ,$(UTARGETS)) UTEST_RUN=mpirun -n 3 endif ifeq ($(UTESTLEAK),1) UTEST_RUN=valgrind --quiet --leak-check=full --error-exitcode=1 valgrind --log-file=/dev/null endif ifeq ($(BUILDTYPE), WASM) UTEST_RUN=node endif .PHONY: utests-all utest utests_gpu-all utest_gpu utests-all: $(UTARGETS) ./utests/utests_run.sh "CPU" "$(UTEST_RUN)" $(UTARGETS) utest: utests-all @echo ALL CPU UNIT TESTS PASSED. utests_gpu-all: $(UTARGETS_GPU) ./utests/utests_run.sh "GPU" "$(UTEST_RUN)" $(UTARGETS_GPU) utest_gpu: utests_gpu-all @echo ALL GPU UNIT TESTS PASSED. .PHONY: clean clean: rm -f `find $(srcdir) -name "*.o"` rm -f $(root)/utests/*.o rm -f $(patsubst %, %, $(UTARGETS)) rm -f $(patsubst %, %, $(UTARGETS_GPU)) rm -f $(libdir)/.*.lock .PHONY: allclean allclean: clean rm -f $(libdir)/*.a $(ALLDEPS) rm -f bart rm -f $(patsubst commands/%, %, $(CTARGETS)) rm -f $(CTARGETS) rm -f $(srcdir)/misc/version.inc rm -rf $(root)/tests/tmp/*/ rm -rf $(root)/stests/tmp/*/ rm -rf $(root)/doc/dx rm -f $(root)/doc/commands.txt rm -f $(root)/save/fftw/*.fftw rm -f $(root)/save/nsv/*.dat touch isclean .PHONY: distclean distclean: allclean -include isclean isclean: $(ALLMAKEFILES) ifeq ($(AUTOCLEAN),1) @echo "CONFIGURATION MODIFIED. RUNNING FULL REBUILD." touch isclean $(MAKE) allclean || rm isclean else ifneq ($(MAKECMDGOALS),allclean) @echo "CONFIGURATION MODIFIED." endif endif # shared library .PHONY: shared-lib shared-lib: make allclean CFLAGS="-fPIC $(OPT) -Wmissing-prototypes" make gcc -shared -fopenmp src/bart.o -Wl,-whole-archive lib/lib*.a -Wl,-no-whole-archive -Wl,-Bdynamic $(FFTW_L) $(CUDA_L) $(BLAS_L) $(PNG_L) $(ISMRM_L) $(LIBS) -lm -lrt -o libbart.so make allclean libbart.so: shared-lib .PHONY: install install: bart install -d $(DESTDIR)/$(PREFIX)/bin/ install bart $(DESTDIR)/$(PREFIX)/bin/ install -d $(DESTDIR)/$(PREFIX)/share/doc/bart/ install $(root)/doc/*.txt $(root)/README $(DESTDIR)/$(PREFIX)/share/doc/bart/ install -d $(DESTDIR)/$(PREFIX)/lib/bart/commands/ # generate release tar balls (identical to github) %.tar.gz: git archive --prefix=bart-$(patsubst bart-%.tar.gz,%,$@)/ -o $@ v$(patsubst bart-%.tar.gz,%,$@) # symbol table bart.syms: bart rules/make_symbol_table.sh bart bart.syms libbart-devel/Makefiles/000077500000000000000000000000001472525725500155105ustar00rootroot00000000000000libbart-devel/Makefiles/README.md000066400000000000000000000017251472525725500167740ustar00rootroot00000000000000### Custom Makefiles Put custom Makefiles here, to be included in the standard Makefile. The build will automatically include the following files in this directory matching the expansion `Makefile.*` Example custom Makefile for modifying build: ```bash ## Makefile.local # Makefile for my local build DEBUG = 1 # Parallel make PARALLEL ?= 1 # GPU CUDA=0 CC=clang OMP=0 # Paths FFTW_BASE := /opt/local/ MATLAB_BASE := /Applications/MATLAB_R2016a.app CUDA_BASE = /usr/local/cuda/ BLAS_BASE := /opt/local ``` Example Makefile and library rules for adding a custom program: ```bash ## Makefiles/Makefile.sum # Compile my custom program, src/sum.c, which relies on # my custom library, lib/libsum.a MODULES_sum = -lsum MODULES_bart += -lsum XTARGETS += sum ``` ```bash ### rules/sum.mk # Build my custom library with files under src/sum/ sumsrcs := $(wildcard $(srcdir)/sum/*.c) sumobjs := $(sumsrcs:.c=.o) .INTERMEDIATE: $(sumobjs) lib/libsum.a: libsum.a($(sumobjs)) ``` libbart-devel/README000066400000000000000000000352231472525725500144750ustar00rootroot00000000000000 0. License ========== See LICENSE file for licensing information. ------------------------------------------------------------------------------- The tools in this software implement various reconstruction algorithms for Magnetic Resonance Imaging. The software is intended for research use only and NOT FOR DIAGNOSTIC USE. It comes without any warranty (see LICENSE for details). Please cite the corresponding articles when using these tools. Some references can be found at the end of this file. The source code might provide more detailed references, e.g. for specific iterative algorithms. 1. Help ======= Please direct all questions or comments to the public mailing list: mrirecon@lists.eecs.berkeley.edu https://lists.eecs.berkeley.edu/sympa/info/mrirecon Note: This list has a public archive! Please do not send any confidential information. Updates and further information can be found here: http://mrirecon.github.io/bart/ 2. Installation =============== 2.1. Packages ------------- The easiest way to install BART on Debian or Ubuntu is to install the BART package. $ sudo apt-get install bart bart-view You can also try the package built with CUDA support: $ sudo apt-get install bart-cuda bart-view Note, if you need Matlab / Python integration, want to use BART on Mac, Windows, or other Linux distributions, or if you need a very recent version or BART, you may need to compile BART yourself. Please see then follow the instructions below. The recommended way to use BART on Windows is with the Windows Subsystem for Linux (WSL2) which is available for Windows 10. Once you have installed WSL2 you can follow the steps for Linux given above to install the BART packages, or compile it yourself as described below. 2.2. Prerequisites ------------------ GCC compiler, the FFTW library, and optionally CUDA. (see recon/Makefile to turn options on or off) The minimum GCC supported is 12. It should also be possible to use the clang compiler. The software can be used in combination with Matlab or octave. There is limited support for reading Cartesian data encoded with the ISMRM Raw Data format when linking with the ISMRMRD library (http://ismrmrd.sourceforge.net/). In the following, the symbol '`$`' indicates a shell prompt. Do not type '`$`' when entering commands. For more build information, check docs/building.txt ### 2.1.1. Linux The software tools should run on any recent Linux distribution. To install the required libraries on Debian and Ubuntu run: $ sudo apt-get install gcc make libfftw3-dev liblapacke-dev libpng-dev libopenblas-dev gfortran (optional) $ sudo apt-get install octave (optional) install version 0.5.2 of the ISMRMRD library To install the required libraries on Redhat / Centos / RockyLinux / AlmaLinux 8 run: $ sudo dnf --enablerepo=powertools install fftw-devel atlas-devel libpng-devel lapack-devel gcc-toolset-12 To activate gcc-12 in the current shell: $ source scl_source enable gcc-toolset-12 or to start a new shell with gcc-12 enabled: $ scl enable gcc-toolset-12 bash To install the required libraries on Redhat / Centos / RockyLinux / AlmaLinux 9 run: $ sudo dnf --enablerepo=crb install fftw-devel atlas-devel libpng-devel lapack-devel gcc-toolset-12 Note that crb (CodeReady Builder) is the new name of powertools from RedHat 9, which contains additional development packages. To install the required libraries on Fedora 39/40 run: $ sudo dnf install gcc make fftw-devel lapack-devel openblas-devel atlas-devel libpng-devel ### 2.1.2. Mac OS X BART is supported on Intel-based and ARM-based Macs. Xcode is also required. For ARM-based Macs, it is recommended to use gcc12 or higher. Using MacPorts (http://www.macports.org/): $ sudo port install fftw-3-single $ sudo port install gcc12 $ sudo port install libpng $ sudo port install openblas $ sudo port install flock $ sudo port install gmake (optional) $ sudo port install octave (optional) install version 0.5.2 of the ISMRMRD library Use gmake when building and select gcc as the compiler: $ CC=gcc-mp-12 gmake Using HomeBrew (https://brew.sh): $ brew install --cask gcc-arm-embedded $ brew install libpng $ brew install fftw $ brew install openblas $ brew install gmake $ brew install llvm libomp Use gmake when building, select gcc as the compiler, and turn off the default setting for MACPORTS: $ CC=gcc MACPORTS=0 gmake ### 2.2.3 Windows BART is supported through WSL 2 which is available on Windows 10. The instructions are similar to installing on Linux. Step-by-step instructions are available on the website at: https://mrirecon.github.io/bart/installation_windows.html First follow the instructions for Debian/Ubuntu in Section 2.1.1 to install required libraries. Next follow the instructions in Section 2.2 to download and compile bart 2.2.3.1. Using BART with Matlab outside of WSL To use BART outside of WSL, e.g. with Matlab, it is recommended to soft link the bart binary to /usr/local/bin. Assuming bart is installed in the ${BART_TOOLBOX_PATH} directory, execute the following command in WSL: $ sudo ln -s ${BART_TOOLBOX_PATH}/bart /usr/local/bin/bart Outside of WSL, copy the files from ${BART_TOOLBOX_PATH}/matlab to a local folder and add the folder to the Matlab path. 2.2. Downloading and Compilation -------------------------------- If you are a git user, you can simply clone our public repository: $ git clone https://github.com/mrirecon/bart Otherwise, please download the latest version as a zip file from Github: http://github.com/mrirecon/bart/releases/latest and unpack it somewhere on your computer. Open a terminal window and enter the bart directory (the top-level directory with the Makefile in it). To build the reconstruction tools type: $ make If you have installed the ISMRMRD library version 0.5.2, you can also build the ISMRM raw data import tool: $ make ismrmrd 2.3. Getting Started -------------------- ### 2.3.1. Organization . main directory / built software tools Makefile makefile Makefiles/ directory for custom makefiles matlab/ Matlab helper scripts python/ Python helper functions doc/ documentation pkg/ packaging for Fedora rules/ more built-related files scripts/ various helper scripts and examples src/ source code src/calib/ source code for sensitivity calibration src/sense/ source code for SENSE or ESPIRiT reconstruction src/noir/ source code for nonlinear inversion src/sake/ source code for SAKE reconstruction src/moba/ source code for model-based reconstruction src/networks/ source code for neural networks src/wavelet/ source code for wavelets src/dfwavelet/ source code for divergence-free wavelets src/lowrank/ source code for low-rank regularization src/simu/ source code for MRI simulation src/noncart/ source code for non-uniform FFT src/iter/ library of iterative algorithms src/linops/ library of linear operators src/nlops/ library of nonlinear operators src/nn/ library for neural networks src/geom/ library for geometric computations src/num/ base library with numerical functions src/misc/ miscellaneous (e.g. I/O) src/lapacke/ copy of a part of LAPACKE src/grecon/ helper functions for generic reconstructions src/ismrm/ support for ISMRM raw data format src/python/ support for Python src/win/ support for Windows tests/ system tests utests/ unit tests lib/ built software libraries ### 2.3.2. Terminal When using the toolbox commands from a UNIX shell, it is recommended to set the BART_TOOLBOX_PATH to the base directory and to add it to the PATH variable. You can do this by running the following command: $ . startup.sh Note: The dot or 'source' command is needed so that the variables are imported into the current shell. ### 2.3.3. Matlab You can set the BART_TOOLBOX_PATH to the base directory and to add it to the Matlab path by running the following command in the bart directory: >> startup (Note: The '>>' indicates the shell prompt. Do not type '>>' when entering commands.) You can use Matlab to read and visualize/process files. To write a data file 'xyz' from Matlab you can run: >> writecfl('xyz', A); Note, that the name 'xyz' is used without filename extension. See below for more information about the file format used in BART. To read the data file 'xyz' back into Matlab use: >> A = readcfl('xyz'); To call a BART tool (e.g. ecalib) from Matlab, you can use the 'bart' command: >> sensitivities = bart('ecalib', kspace); Download and unpack the examples which demonstrate interoperability with Matlab. Go to the examples directory and run: >> examples ### 2.3.4. Python You can set the BART_TOOLBOX_PATH to the base directory and start a Python interactively as follows: $ python3 -i startup.py To avoid doing the above every time, it is recommended to update your PYTHONPATH environment. For example, in Linux, assuming your BART_TOOLBOX_PATH is set, add the below line to your bashrc file. $ export PYTHONPATH="${BART_TOOLBOX_PATH}/python:$PYTHONPATH" After doing so, we can simply import as needed. >>> from bart import bart >>> import cfl You can use Python to read and visualize/process files. To write a data file 'xyz' from Python you can run: >>> cfl.writecfl('xyz', A); Note, that the name 'xyz' is used without filename extension. See below for more information about the file format used in BART. To read the data file 'xyz' back into Python use: >>> A = cfl.readcfl('xyz'); To call a BART tool (e.g. ecalib) from Python, you can use the 'bart' command: >>> sensitivities = bart(1, 'ecalib', kspace); The bart function expects the following signature: >>> = bart(, , , ...) To use BART in a script, please follow the steps in the startup.py file. 3. Data Format ============== 3.1. Generic ------------ The input and output datasets are each stored in a pair of files: one header (*.hdr) and one raw data (*.cfl). The header is a simple text readable file that describes the dimensions of the data. The raw data file is a binary file containing a single contiguous block of array data of dimensions described in the header stored in column-major order (first index is sequential). The raw data file is complex float (32 bit real + 32 bit imaginary, IEEE 754 binary32 little-endian). Convenience methods to read and write our data files using Matlab may be found in the matlab/ directory (readcfl.m and writecfl.m). Similar methods for Python may be found in the python/ directory (cfl.py). 3.2. Magnetic Resonance Imaging Data ------------------------------------ For MRI data and images, the dimensions are usually assigned in the following order: 0 readout 1 phase-encoding dimension 1 2 phase-encoding dimension 2 3 receive channels 4 ESPIRiT maps ... ... (more dimensions are defined in src/misc/mri.h) Undersampled data is stored with zeros in the unsampled positions. 3.3. Non-Cartesian Trajectories and Samples ------------------------------------------- The k-space coordinates for each sample are stored along dimension 0 which must have size equal to three. The unit of measurement is 1/FOV. Dimension 1 stores the samples along a single readout window while dimension 2 may be used to differentiate between different lines (e.g. radial spokes). Channel (3) and map (4) dimensions must not be used (i.e. have size one), while other dimensions can be used as for Cartesian data. Non-Cartesian samples are stored in a similar way as trajectories except that dimension 0 is not used. The channel dimension can be used for different receiver coils as usual. 4. Command-line Tools ===================== All tools operate on the simple file format given above. Indices and dimensions run from 0 to N-1. Sometimes a set of dimensions is given as a bitmask where the lowest bit corresponds to the 0st dimension. For example, an inverse Fourier transform of first three dimensions can be performed with the following command: $ bart fft -i 7 kspace volume More information about each command can be found using the help option '-h' or in the 'doc/commands.txt' file that can be generated using 'make doc/commands.txt'. 5. Information for Contributors =============================== Thank you for helping to improve BART! In order for us to be able to accept your contribution, it has to be released under the BSD license used by BART (see LICENSE file). By submitting patches to us it is understood that you agree to these terms and that you confirm that you hold all necessary rights yourself or have permission from the copyright holder. Please also add the name of the copyright holder and name and email of the author(s) to the copyright headers in all new or changed files. 6. Troubleshooting ================== 6.1. Installation Problems -------------------------- When problems occur after updating BART or changing build variables, it may help to clean the build environment and to recompile BART: $ make allclean $ make Make sure the PATH and BART_TOOLBOX_PATH environment variables are set correctly. Sometimes, several versions of BART are installed and the wrong version is used accidentally. 6.2. Reporting Problems ----------------------- Please report problems to our mailing list and include the following information (as applicable): * What system you are using (Linux, Mac OS X, Windows) and whether you use Matlab/Python wrappers. * The output of the 'version' command: $ bart version -V * The exact BART command-line that caused the problem. * The specific error message. * Information about the data files used when the problem occurred (please provide at least the dimensions of all input files). 6.3. Debugging -------------- See 'doc/debugging.txt' for details. 7. References ============= * Tamir JI, Ong F, Cheng JY, Uecker M, Lustig M, Generalized Magnetic Resonance Image Reconstruction using The Berkeley Advanced Reconstruction Toolbox, ISMRM Workshop on Data Sampling and Image Reconstruction, Sedona 2016 * Uecker M, Ong F, Tamir JI, Bahri D, Virtue P, Cheng JY, Zhang T, Lustig M, Berkeley Advanced Reconstruction Toolbox, Annual Meeting ISMRM, Toronto 2015 In: Proc Intl Soc Mag Reson Med 23:2486 * Uecker M, Virtue P, Ong F, Murphy MJ, Alley MT, Vasanawala SS, Lustig M, Software Toolbox and Programming Library for Compressed Sensing and Parallel Imaging, ISMRM Workshop on Data Sampling and Image Reconstruction, Sedona 2013 References related to implemented methods and algorithms can be found in the file 'doc/references.txt'. libbart-devel/README.md000066400000000000000000000023231472525725500150670ustar00rootroot00000000000000 BART: Toolbox for Computational Magnetic Resonance Imaging ========================================================== [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.592960.svg)](https://doi.org/10.5281/zenodo.592960) The Berkeley Advanced Reconstruction Toolbox (BART) is a free and open-source image-reconstruction framework for Computational Magnetic Resonance Imaging. The tools in this software implement various reconstruction algorithms for Magnetic Resonance Imaging. The software is intended for research use only and NOT FOR DIAGNOSTIC USE. It comes without any warranty (see LICENSE for details). For more information: * Basics: [README](./README) * Website: https://mrirecon.github.io/bart/ Information for Contributors ---------------------------- Thank you for helping to improve BART! In order for us to be able to accept your contribution, it has to be released under the BSD license used by BART (see LICENSE file). By submitting patches to us it is understood that you agree to these terms and that you confirm that you hold all necessary rights yourself or have permission from the copyright holder. Please also add the name of the copyright holder to the copyright header in all new or changed files. libbart-devel/ar_lock.sh000077500000000000000000000006371472525725500155670ustar00rootroot00000000000000#!/bin/bash set -e if command -v flock > /dev/null ; then flock ${LOCKDIR}/.`basename $2`.lock -c "ar $*" exit 0 fi if command -v shlock > /dev/null ; then LOCK=/tmp/`basename $2`.lock trap 'rm -f ${LOCK} ; exit 1' 1 2 3 15 while true ; do if shlock -p $$ -f ${LOCK} ; then ar $* rm -rf ${LOCK} exit 0 else sleep 1 fi done fi echo "Error: no flock/shlock command!" exit 1 libbart-devel/build_webasm.sh000077500000000000000000000060661472525725500166140ustar00rootroot00000000000000#!/bin/bash emcc -O3 -Wall bart.o -s EXPORTED_FUNCTIONS="['__Block_object_dispose','_malloc','_free','_bart_version', '_calc_phantom', '_calc_bart', \ '_calc_circ', '_fftc','_ifftc','_num_init', '_pha_opts_defaults', '_memcfl_create', '_load_cfl', '_main_ecalib', '_main_pics', '_main_phantom', \ '_main_fft']" -s ALLOW_MEMORY_GROWTH=1 -s MAXIMUM_MEMORY=4GB -o ./web/wwwroot/bart.js \ $HOME/wasm_libs/lib/libfftw3f.a $HOME/wasm_libs/lib/libopenblas.a $HOME/wasm_libs/usr/local/lib/libBlocksRuntime.a emcc -O3 -Wall bart.o -s EXPORTED_FUNCTIONS="['__Block_object_dispose','_malloc','_free','_bart_version', \ '_memcfl_create', '_load_cfl', '_memcfl_list_all', '_memcfl_unlink', \ '_main_avg', '_main_bench', '_main_bin', '_main_bitmask', '_main_cabs', '_main_caldir', '_main_calmat', '_main_carg', '_main_casorati', \ '_main_cc', '_main_ccapply', '_main_cdf97', '_main_circshift', '_main_conj', '_main_conv', '_main_conway', '_main_copy', '_main_cpyphs', \ '_main_creal', '_main_crop', '_main_delta', '_main_ecalib', '_main_ecaltwo', '_main_estdelay', '_main_estdims', '_main_estshift', \ '_main_estvar', '_main_extract', '_main_fakeksp', '_main_fft', '_main_fftmod', '_main_fftrot', '_main_fftshift', '_main_filter', \ '_main_flatten', '_main_flip', '_main_fmac', '_main_fovshift', '_main_homodyne', '_main_ictv', '_main_index', '_main_invert', \ '_main_itsense', '_main_join', '_main_looklocker', '_main_lrmatrix', '_main_mandelbrot', '_main_measure', '_main_mip', \ '_main_mnist', '_main_moba', '_main_mobafit', '_main_morphop', '_main_multicfl', '_main_nlinv', '_main_nnet', '_main_noise', '_main_normalize', \ '_main_nrmse', '_main_nufft', '_main_nufftbase', '_main_onehotenc', '_main_ones', '_main_pattern', '_main_phantom', '_main_pics', \ '_main_pocsense', '_main_poisson', '_main_pol2mask', '_main_poly', '_main_reconet', '_main_repmat', '_main_reshape', '_main_resize', \ '_main_rmfreq', '_main_rof', '_main_roistat', '_main_rss', '_main_rtnlinv', '_main_sake', '_main_saxpy', '_main_scale', '_main_sdot', '_main_show', \ '_main_signal', '_main_sim', '_main_slice', '_main_spow', '_main_sqpics', '_main_squeeze', '_main_ssa', '_main_std', '_main_svd', '_main_tgv', \ '_main_threshold', '_main_toimg', '_main_traj', '_main_transpose', '_main_twixread', '_main_upat', '_main_var', '_main_vec', '_main_version', \ '_main_walsh', '_main_wave', '_main_wavelet', '_main_wavepsf', '_main_whiten', '_main_window', '_main_wshfl', '_main_zeros', '_main_zexp' \ ]" -s ALLOW_MEMORY_GROWTH=1 -s MAXIMUM_MEMORY=4GB -o ./web/wwwroot/bart_cmd.js \ $HOME/wasm_libs/lib/libfftw3f.a $HOME/wasm_libs/lib/libopenblas.a $HOME/wasm_libs/usr/local/lib/libBlocksRuntime.a # modularized: emcc -Wall \ -s EXPORTED_FUNCTIONS=_main,_malloc,_free,_mmap,_munmap,_setenv,_getenv,__Block_object_dispose \ -s EXPORTED_RUNTIME_METHODS=ccall,cwrap,FS \ -s ALLOW_MEMORY_GROWTH=1 -s MAXIMUM_MEMORY=4GB -s INITIAL_MEMORY=256MB -s TOTAL_STACK=64MB -sMODULARIZE=1 -sEXPORT_NAME=bart_main \ -O3 \ -o ./web/wwwroot/bart_main.js \ bart.o $HOME/wasm_libs/lib/libfftw3f.a $HOME/wasm_libs/lib/libopenblas.a $HOME/wasm_libs/usr/local/lib/libBlocksRuntime.a libbart-devel/commands/000077500000000000000000000000001472525725500154115ustar00rootroot00000000000000libbart-devel/commands/.gitignore000066400000000000000000000001261472525725500174000ustar00rootroot00000000000000# Ignore everything in this directory * # Except this file !.gitignore # and sub !sub libbart-devel/commands/sub000077500000000000000000000006361472525725500161350ustar00rootroot00000000000000#!/bin/bash set -e if [ ! -f ${BART_TOOLBOX_PATH}/bart ] ; then if [ -f ${TOOLBOX_PATH}/bart ] ; then BART_TOOLBOX_PATH=${TOOLBOX_PATH} else echo "\$BART_TOOLBOX_PATH is not set correctly!" exit 1 fi fi if [ $# -lt 3 ] ; then echo "Usage: $0 " >&2 echo "Subtract from and save in " >&2 exit 1 fi ${BART_TOOLBOX_PATH}/bart saxpy -- -1. $1 $2 $3 libbart-devel/doc/000077500000000000000000000000001472525725500143555ustar00rootroot00000000000000libbart-devel/doc/applications.txt000066400000000000000000000140031472525725500176020ustar00rootroot00000000000000 (an incomplete list of papers using BART...) Hollingsworth KG, Higgins DM, McCallum M, Ward L, Coombs A, Straub V. Investigating the quantitative fidelity of prospectively undersampled chemical shift imaging in muscular dystrophy with compressed sensing and parallel imaging reconstruction. Magn Reson Med 2014; 72:1610-1619. Zhang T, Cheng JY, Potnick AG, Barth RA, Alley MT, Uecker M, Lustig M, Pauly JM, Vasanawala SS. Fast Pediatric 3D Free Breathing Abdominal Dynamic Contrast Enhanced MRI with a High Spatiotemporal Resolution, J Magn Reson Imaging 2015; 41:460-473. Addy NO, Ingle RR, Wu HH, Hu BS, Nishimura DG. High-resolution variable-density 3D cones coronary MRA. Magn Reson Med 2015; 74:614-621. Cheng JY, Zhang T, Ruangwattanapaisarn N, Alley MT, Uecker M, Pauly JM, Lustig M, Vasanawala SS. Free-Breathing Pediatric MRI with Nonrigid Motion Correction and Acceleration, J Magn Reson Imaging 2015; 42:407-420. Athalye V, Lustig M, Uecker M. Parallel Magnetic Resonance Imaging as Approximation in a Reproducing Kernel Hilbert Space, Inverse Problems 2015; 31:045008. Mann LW, Higgins DM, Peters CN, Cassidy S, Hodson KK, Coombs A, Taylor R, Hollingsworth KG. Accelerating MR Imaging Liver Steatosis Measurement Using Combined Compressed Sensing and Parallel Imaging: A Quantitative Evaluation, Radiology 2016; 278:245-256. Cheng JY, Hanneman K, Zhang T, Alley MT, Lai P, Tamir JI, Uecker M, Pauly JM, Lustig M, Vasanawala SS. Comprehensive Motion-Compensated Highly-Accelerated 4D Flow MRI with Ferumoxytol Enhancement for Pediatric Congenital Heart Disease. J Magn Reson Imaging 2016; 43:1355-1368. Tamir JI, Uecker M, Chen W, Lai P, Aleey MT, Vasanawala SS, Lustig M. T2-Shuffling: Sharp, Multi-Contrast, Volumetric Fast Spin-Echo Imaging. Magn Recon Med 2017; 77:180-195. Uecker M, Lustig M. Estimating Absolute-Phase Maps Using ESPIRiT and Virtual Conjugate Coils. Magn Reson Med 2017; 77:1201-1207. Cheng JY, Zhang T, Alley MT, Uecker M, Lustig M, Pauly JM, Vasanawala SS. Comprehensive Multi-Dimensional MRI for the Simultaneous Assessment of Cardiopulmonary Anatomy and Physiology. Scientific Reports 2017; 7:5330. Bao S, Tamir JI, Young JL, Tariq U, Uecker M, Lai P, Chen W, Lustig M, Vasanawala SS. Fast comprehensive single-sequence four-dimensional pediatric knee MRI with T2 shuffling. J Magn Reson Imaging 2017; 45:1700-1711. Mazzoli V, Schoormans J, Froeling M, Sprengers AM, Coolen BF, Verdonschot N, Strijkers GJ, Nederveen AJ. Accelerated 4D self‐gated MRI of tibiofemoral kinematics. NMR in Biomed 2017; 30:e3791. Moghari MH, Uecker M, Roujol S, Sabbagh M, Geva T, Powell AJ. Accelerated Whole-heart Magnetic Resonance Angiography Using a Variable-Density Poisson-Disc Undersampling Pattern and Compressed Sensing Reconstruction. Magn Reson Med 2018; 79:761-769. Peper ES, Strijkers GJ, Gazzola K, Potters WV, Motaal AG, Luirink IK, Hutten BA, Wiegman A, van Ooij P, van den Born B-JH, Nederveen AJ, Coolen BF. Regional assessment of carotid artery pulse wave velocity using compressed sensing accelerated high temporal resolution 2D CINE PC MRI. J Cardiovasc Magn Reson 2018; 20:1-12. Mazzoli V, Gottwald LM, Peper ES, Froeling M, Coolen BF, Verdonschot N, Sprengers AM, van Ooij P, Strijkers GJ, Nederveen AJ. Accelerated 4D phase contrast MRI in skeletal muscles contraction. Magn Reson Med 2018; 80:1799-1811. Rosenzweig S, Holme HCM, Wilke RN, Voit D, Frahm J, Uecker M. Simultaneous Multi-Slice Reconstruction Using Regularized Nonlinear Inversion: SMS-NLINV. Magn Reson Med 2018; 79:2057-2066. Lyu M, Barth M, Xie VB, Liu Y, Ma X, Feng Y, Wu EX. Robust SENSE reconstruction of simultaneous multislice EPI with low‐rank enhanced coil sensitivity calibration and slice‐dependent 2D Nyquist ghost correction. Magn Reson Medo 2018; 80:1376-1390. Sanders J, Song H, Frank S, Bathala T, Venkatesan A, Anscher M, Tang C, Bruno T, Wei W, Ma J. Parallel imaging compressed sensing for accelerated imaging and improved SNR in MRI-based prostate brachytherapy post-implant dosimetry. Brachytherapy 2018; 17:816-824. Chen F, Taviani V, Tamir JI, Cheng JY, Zhang T, Song Q, Hargreaves BA, Pauly JM, Vasanawala SS. Self‐Calibrating Wave‐Encoded Variable‐Density Single‐Shot Fast Spin Echo Imaging. J Magn Reson Imaging 2018; 47:954-966. Roeloffs V, Rosenzweig S, Holme HCM, Uecker M, Frahm J. Frequency-modulated SSFP with radial sampling and subspace reconstruction: A time-efficient alternative to phase-cycled bSSFP. Magn Reson Med 2019; 81:1566-1579. de Jonge CS, Coolen BF, Peper ES, Motaal AG, Nio CY, Somers I, Strijkers GJ, Stoker J, Nederveen AJ. Evaluation of compressed sensing MRI for accelerated bowel motility imaging. European Radiology Experimental 2019; 3:1-12. Walheim J, Dillinger H, Kozerke. Multipoint 5D flow cardiovascular magnetic resonance - accelerated cardiac- and respiratory-motion resolved mapping of mean and turbulent velocities. J Cardiovasc Magn Reson 2019; 21:42. Wang X, Kohler F, Unterberg-Buchwald C, Lotz J, Frahm J, Uecker M. Model-based myocardial T1 mapping with sparsity constraints using single-shot inversion-recovery radial FLASH. J Cardiovasc Magn Reson 2019; in press. Hauptmann A, Arridge S, Lucka F, Muthurangu V, Steeden JA. Real‐time cardiovascular MR with spatio‐temporal artifact suppression using deep learning–proof of concept in congenital heart disease. Magn Reson Med 2019; 81:1143-1156. Tamir JI, Taviani V, Alley MT, Perkins BC, Hart L, O'Brien K, Wishah F, Sandberg JK, Anderson MJ, Turek JS, Willke TL, Lustig M, Vasanawala SS. Targeted rapid knee MRI exam using T2 shuffling. J Magn Reson Imaging 2019; 49: e195-e204. doi:10.1002/jmri.26600 Su Y, Anderson M, Tamir JI, Lustig M, Li K. Compressed Sensing MRI Reconstruction on Intel HARPv2. 2019 IEEE 27th Annual International Symposium on Field-Programmable Custom Computing Machines (FCCM), San Diego, CA, USA, 2019, pp. 254-257. doi: 10.1109/FCCM.2019.00041 Smith DS, Sengupta S, Smith SA, Welch EB. Trajectory optimized NUFFT: Faster non-Cartesian MRI reconstruction through prior knowledge and parallel architectures. Magn Reson Med 2019; 81:2064-2071. libbart-devel/doc/bart.1000066400000000000000000000004661472525725500153750ustar00rootroot00000000000000.TH BART 1 .SH NAME bart - Berkeley Advanced Reconstruction Toolbox .SH SYNOPSIS .B bart .IR command [\fB\-h\fR] ... .SH DESCRIPTION .B bart invokes a command from the Berkeley Advanced Reconstruction Toolbox. .SH AUTHOR BART Developer Team and Contributors. .SH SEE ALSO .B https://mrirecon.github.io/bart/ libbart-devel/doc/bitmasks.txt000066400000000000000000000012771472525725500167420ustar00rootroot00000000000000 A bitmask is a binary number where the individual bits are used to indicate something. For example, a bitmask is often used to select a subset of dimensions, e.g. if the FFT should be applited to dimensions 3 and 5 the corresponding bits at position 3 and 5 are set in a bitmask: 876543210 (position of the bit) 000101000b (bitmask as binary number) In decimal this binary number is 40, so the command $ bart fft 40 input output would transform the dimensions 3 and 5 (counting from zero). The 'bitmask' command computes the required bitmask from the set of dimensions: $ bart bitmask 3 5 40 On the command-line both commands can also be combined: $ bart fft 'bart bitmask 3 5' input output libbart-devel/doc/building.txt000066400000000000000000000115331472525725500167160ustar00rootroot00000000000000 0. Introduction BART has a build system based on GNU Make. The build system offers many features for users and developers: BART can be built on different architectures, with different compilers, and with various optional features. This makes it easy to use BART in different environments, on a laptop, a multi-GPU system, a HPC cluster, or in the cloud. The build system also supports running system and unit tests. To make developing more fun, the makefile is optimized for extremely fast builds. Using parallel builds, BART can be built from scratch in about five seconds. After changing a single source code file it is usually possible to rebuild the binary in less than a second. This is accomplished by automatically maintaining dependencies between object files and incrementally updating the binaries from object stored in libraries. 1. Building BART 1.2. Main Build Targets 1.2.1. Default By default, the main 'bart' binary will be built with: make or make bart 1.2.1. Building Individual Commands Individual BART commands can be built as standalone binaries: make All BART commands can be built with: make all Attention: If the BART_TOOLBOX_PATH is set, the 'bart' tool will call the standalone tool and not the built-in tool. This can be used to selectively update individual tools, but can also cause confusion. 1.2.2. Testing System and unit tests can be build and run with: make test make utest To run individual tests: make tests/test-pics-cs 1.2.3. Cleaning Up To clean up working directory, run: make clean To also remove all built commands, run: make allclean 1.3. Libraries As a side effect of build the main 'bart' tool, static libraries are generated in 'lib/'. 2. Local Configuration The build can be configured by setting or adding variables. 2.1. Makefile.local It is recommended to put this variables into a file called 'Makefile.local' in the main BART directory. This file is then automatically included. By having local configuration is a separate file, local changes are not overwritten when BART is updated and do not cause conflicts when using a version control system. 2.2. Makefile. It is also possible to put machine-specific configurations variables in a Makefile. where is the name of the machine as returned by 'uname -n'. 2.3 Custom Makefiles directory Additional Makefiles can be included by placing them in the Makefiles directory. All files matching the expansion Makefiles/Makefile.* are automatically included in the build. See Makefiles/README.md for example files. 3. Build Options 3.1. Adding New BART Commands # add new tool (src/foo.c) to list of targets XTARGETS += foo # dependencies for foo MODULES_foo += -llowrank 3.2. Build Flags 3.2.1. Silent Builds Silent builds can be activated with the following option: MAKEFLAGS += --silent 3.3.2. Parallel Build Parallel builds can be activated with the following option: PARALLEL=1 3.3. Optional Features Some BART features are optional, because they depend on the other libraries or features which are not available everywhere. 3.3.1. CUDA Support for CUDA can be turned on. It may be necessary to also provide the base path for CUDA installation. CUDA is supported starting with version 8, however, newer versions are recommended. CUDA=1 CUDA_BASE=/usr/ 3.3.2. OpenMP OpenMP can be turned off for compilers which do not support it properly (.e.g. clang): OMP=0 3.3.3. FFTW Threads It is possible to turn off FFTW threads if the library is not available: FFTWTHREADS=0 3.3.4. ISMRM Raw Data Format If the ISMRMRD library is installed, preliminary support for the ISMRM raw data format can be activated: ISMRMRD=1 3.3.5. TensorFlow Integration with TensorFlow is possible using the C API of TensorFlow, which can be downloaded from here: https://www.tensorflow.org/install/lang_c See tensorflow.txt for more information on conventions used. TensorFlow support can then be activated with: TENSORFLOW=1 TENSORFLOW_BASE=/opt/tensorflow/ 3.3.6 MPI Support for MPI can be turned on. It is recommended to use Open MPI as MPI implementation as this enables automatic detection of CUDA-aware MPI. Moreover BART reads environment variables exported by the Open MPI implementation of mpirun to automatically activate run-time support for MPI. MPI=1 3.4. Compiler 3.4.1. Different Compiler If different compilers or compiler versions are installed, it is possible to override the default compiler: CC = gcc-4.8 #CC = gcc-5 #CC = clang-3.5 3.4.2. Different CFLAGS Different CFLAGS can be set like this: CFLAGS= -g -O2 -ffast-math 3.4.3. Static Linking Static linking can be used to build binaries which do not depend on external shared libraries. This might be useful if BART is to be deployed on a different machine where it is difficult to install required dependencies. SLINK=1 libbart-devel/doc/debugging.txt000066400000000000000000000015201472525725500170470ustar00rootroot00000000000000 Running a command in a debugger ------------------------------- This involves several (easy) steps: 1. Recompile BART with debugging information. Create a Makefile.local in the BART directory with the following line added: DEBUG=1 Then recompile with: make allclean make bart 2. Install the GNU debugger (gdb) 3. Run the failing BART command: gdb --args bart [ ...] ... 4. Then type 'run' to start the process. If it crashes, you are back in the debugger. You can also type CTRL-C to interrupt it at any time. In the debugger: You can type 'bt' to get a backtrace which is helpful to investigate a segmentation fault or similar. You can also call functions. For example, this can be used to save a multi-dimensional array from the debugger like this: (gdb) call dump_cfl("dbg_img", 16, dims, image) libbart-devel/doc/dimensions-and-strides.txt000066400000000000000000000037031472525725500215040ustar00rootroot00000000000000 BART is built around a library which defines many generic functions on multi-dimensional arrays. Most functions come in two flavours: A basic version (e.g. 'md_copy') which takes as input the dimensions for its arguments and an extended version (e.g. 'md_copy2') which also takes the strides for each argument. The basic versions assume strides for a column-major array which is contiguous in memory. A stride refers to the distance in memory between successive elements in an array. They are used to compute the position of an element in memory for a given multi-index by multiplying the index of each dimension with the corresponding stride and summing the results. For a regular array of complex floats continuously laid out in memory with dimensions 'dim[N]' the default strides are: str[0] = sizeof(complex float) str[1] = dim[0] * sizeof(complex float) str[2] = dim[0] * dim[1] * sizeof(complex float) ... There is one exception: For a dimension with length one, the corresponding stride is set to zero. These default strides are what the function 'md_calc_strides' computes and which are also used automatically whenever strides are not explicitly specified. Dimensions and strides can be manipulated to obtain different views of the same array without having to make a copy. For example, swapping dimensions and strides for two indices yields a transposed view of the array, an increased stride yields a view of a sub-array, a negative stride a view where the corresponding dimension is reversed (flipped), and a zero stride yields a view in which one dimension is replicated. Many basic functions such as 'md_transpose', 'md_flip', 'md_slice', 'md_copy_block' etc. are implemented in this way by setting up dimensions and strides and calling into the generic copy function 'md_copy2'. Even much more complicated operations can often be implemented in just a few lines of code. One example is building a Casorati matrix of overlapping blocks (see 'num/casorati.c'). libbart-devel/doc/distributed-architectures.txt000066400000000000000000000106771472525725500223160ustar00rootroot00000000000000Distributed architectures means to use different compute entities. Nodes can be PCs, workstations, or nodes of a cluster. They can be connected by TCP/IP or other connections such as InfiniBand. An alternative term would be heterogeneous systems. BART uses the Message Passing Interface (MPI) [1] to work on distributed systems. # Basic Requirements 1.) Install (Open-)MPI on all nodes 2.) make sure bart executable is working on all nodes (compiling on each node may be necessary, especially if different OS are used) 3.) build bart with MPI=1 4.) setup ssh connections between nodes (requires ssh-server on each node as well) 5.) setup file share between the nodes (for file reading/writing) 6.) run bart (with mpirun) and parallel flags (bart -p) # BART command line interface (no mpi required) - bart -p [-s dim0 ... dimN] [-e maxdim0 ... maxdimN] [-S] tool - l : specifies dimensions of looping which will be parallelized - -s: gives start dimensions or (if -e not given) slice which should be processed - -e: gives max in dimensions (imagine a for loop with i < maxdim) - Example 1: loop in dimension 13 from item 1 to 3 - bart -l $(bart bitmask 13) -s 1 -e 4 fft 3 example_file.ra - Example 2: process only slice 2 in a 3D stack - bart -l (bart bitmask 3) -s 2 fft 1 example_file.ra - Example 3: loop in dimension 13 over all items (3 items required in this dimensions for this example) - bart -l (bart bitmask 13) -e 3 example_file.ra - Example 4: loop in dimensions 12 and 13 over items 2 to 3 and 3 to 5 - bart -l (bart bitmask 12 13) -s 2:3 -e 3:5 fft 1 example_file.ra # Useful commands - [Setup ssh key]: ssh-keygen -t rsa -b 2024 -f ~/.ssh/mpi - generate rsa key with bitlength 2024 in file ~/.ssh/mpi - [Copy ssh key]: ssh-copy-id @ - copies public key from to , 'ssh-copy-id' has to be installed separately - Advantage: you can not accidentally share your private key - [Setup file share over ssh]: sshfs @: - mounts on - , user on , Note: if ssh is setup correctly, you don't need the same users on each nodes - [Unmount file share over ssh]: fusermount -u - [Run bart with MPI]: 'mpirun -n --host : -x -wdir bart -p [-s dim0 ... dimN] [-e max0 ... maxN] [-S] tool : ... - -n: specifies how many slots should be used, slots != processes - --host ip address or better ssh-alias, specify how many slots should max available, has to correspond with - -x if needed set environment variable (e.g.: useful for DEBUG_LEVEL) - -wdir specify working directory, could be different on each node - bart has to be the absolute path to the bart executable on each node if not the same or in - : separates different node configurations (basically repeat everything for each node with adopted parameters, if they are the same, you don't have to separate them, just use --hostfile ) - [Setup symbolic link]: ln -s - Setup an symbolic link to create the same file structure on nodes to avoid the need for '-wdir' # Useful files - ~/.ssh/config: "Host User HostName IdentityFile " - alternatively setup '/etc/hosts', to use names for nodes # Troublehooting - Q: Nothing happens at all after running 'mpirun' A: Check the mpi versions on each node, they should be the same or at least same major version 'mpirun --version' - Q: "No protocol specified": A: This is an warning/error from ssh. It means that no protocol for X is specified - Q: "ssh: connect to host port 22: Connection refused" A: Most likely you have to install openssh-server on host - Q: "WARNING: Open MPI failed to TCP connect to a peer MPI process." A: Most likely you didn't setup ssh correctly, check each node connection again. You should be able to connect to each node and vice versa - Q: "/lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.33' /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.34'" - A: Most likely you nodes use different OS (distributions) with different versions of GLIBC. Compile bart on each node separately Use the absolute path to this binary in your call of 'mpirun', different nodes can be separated by ':' [1] https://mpitutorial.com/ libbart-devel/doc/fft.txt000066400000000000000000000042311472525725500156750ustar00rootroot00000000000000 Centered FFT ------------ The center is N / 2 for even N and (N - 1) / 2 for odd N when counting from zero. Instead of using fftshift/ifftshift we usually use fftmod/ifftmod. While fftshift/ifftshift involves a cyclic copy of memory, fftmod applies a linear phase which has a similar effect. We prefer fftmod because we can usually merge this phase with other operations to reduce computation time. Though similar, there are some subtle differences which one has to keep in mind. The correct use of fftshift/ifftshift for a centered forward/inverse FFT is the following: forward: 1. ifftshift 2. fft 3. fftshift inverse: 1. ifftshift 2. ifft 3. fftshift In contrast, the correct use of fftmod/ifftmod for a centered forward/inverse FFT is this: forward: 1. fftmod 2. fft 3. fftmod inverse: 1. ifftmod 2. ifft 3. ifftmod If \xi_N is the N-th root of unity with smallest positive complex argument, the uncentered forward DFT of length N is: \hat f(k) = \sum_{x=0}^{N-1} \xi_N^{-xk} f(x) Shifting the center from index 0 to new index c yields the formula for the centered forward DFT of length N: \hat f_c(k) = \sum_{x=0}^{N-1} \xi_N^{-(x-c)(k-c)} f_c(x) Note that this corresponds to shifts in different directions for input and output. Expanding the exponent yields: (x-c)(k-c) = xk - xc - ck + c^2 Thus, the centered FFT can be implemented by multiplication with a linear phase before and after calling the uncentered FFT: \hat f(k) = \xi_N^{(k-c/2)c} \sum_{x=0}^{N-1} \xi_N^{-xk} \x_N^{(x-c/2)c} f(x) Observe that this is the same linear phase applied to the input and output. Note that we distributed the additional phase \xi^{-c^2} evenly to both terms. If N is a multiple of four, then c^2 = N (N/4) and the additional phase term vanishes. Then \xi_N^{kc} and \xi_N^{xc} are simply the alternating sequence of 1, -1, 1, -1, ... Because ifftmod applies the conjugate phase this implies that it is the same as fftmod in this special case. If N is a multiple of two, the additional phase is -pi/2. Evenly distributed this yields a factor of '-i' (i the imaginary number), i.e. fftmod applies -i, +i, -i, +i, ... For N odd the phase is more complicated. libbart-devel/doc/pics.txt000066400000000000000000000020721472525725500160550ustar00rootroot00000000000000 Returns the coil-combined images in image domain. For Cartesian imaging, 3D k-space (kz along z dim) must be provided. By default, pics assumes a 3D reconstruction. For a slice-by-slice 2D reconstruction, pics can be called in a loop. If no scaling factor is provided, pics will scale the data prior to reconstruction. The scaling factor is calculated using the center k-space region. This scaling will be undone before returning if the -S flag is provided which may be important for computing quantitative parameters. Additional dimensions such as coils, maps, and time must follow the dimension order specified in mri.h, otherwise regularizers may be applied on the wrong dimensions and the forward sense operator may be inaccurate. The sampling mask is determined automatically from the provided k-space. For the sampling mask to be calculated correctly, missing samples must be exactly zero. Small values in k-space, perhaps from numerical errors, will cause the sampling mask (reflected in logged acceleration factor), and the data consistency step to be incorrect. libbart-devel/doc/random-numbers.txt000066400000000000000000000016441472525725500200540ustar00rootroot000000000000001. Generator Since commit 697e84f2d9 (April 2024), bart uses the Philox-4x32-10 pseudo-random number generator (PRNG) by default. 2. Interaction with the bart looping interface and MPI Care has to be taken when using random numbers in a parallel context (e.g. by using bart -l, bart -p, or mpi with bart): By default, bart will produce reproducible random numbers regardless of number of threads or MPI processes used. This means that the output is as if the bart command had been called on the entire input, without using the loop interface. If this behavior is undesirable, it can be changed or even deactivated for the loop interface by specifying '--random-dims flags', which will vary the random numbers over the dimensions selected by the flags argument. Note also that reproducibility of random numbers cannot be guaranteed if the loop dimensions specified with 'bart -l/-p' are not the last dimensions! libbart-devel/doc/references.txt000066400000000000000000000425471472525725500172530ustar00rootroot00000000000000 - BART - Uecker M, Ong F, Tamir JI, Bahri D, Virtue P, Cheng JY, Zhang T, Lustig M. Berkeley Advanced Reconstruction Toolbox. Annual Meeting ISMRM, Toronto 2015, In: Proc Intl Soc Mag Reson Med 2015; 23:2486. Uecker M, Virtue P, Ong F, Murphy MJ, Alley MT, Vasanawala SS, Lustig M. Software Toolbox and Programming Library for Compressed Sensing and Parallel Imaging, ISMRM Workshop on Data Sampling and Image Reconstruction, Sedona 2013. Tamir JI, Ong F, Cheng JY, Uecker M, Lustig M. Generalized Magnetic Resonance Image Reconstruction using The Berkeley Advanced Reconstruction Toolbox, ISMRM Workshop on Data Sampling and Image Reconstruction, Sedona 2016. Uecker M. Machine Learning Using the BART Toolbox - Implementation of a Deep Convolutional Neural Network for Denoising. Joint Annual Meeting ISMRM-ESMRMB, Paris 2018, In: Proc. Intl. Soc. Mag. Reson. Med. 2018; 26:2802. Blumenthal M. and Uecker M. Deep Deep Learning with BART. ISMRM Annual Meeting 2021, In: Proc. Intl. Soc. Mag. Reson. Med. 2021; 29:1754. Luo G, Blumenthal M, Uecker M. Using data-driven image priors for image reconstruction with BART. ISMRM Annual Meeting 2021, In: Proc. Intl. Soc. Mag. Reson. Med. 2021; 29:1756. Holme HCM and Uecker M. Reproducibility meets Software Testing: Automatic Tests of Reproducible Publications Using BART. ISMRM Annual Meeting 2021, In: Proc. Intl. Soc. Mag. Reson. Med. 2021; 29:3768. Scholand N, Schilling M, Heide M, Uecker M. Digital Reference Objects with BART. In: Proc Intl Soc Mag Reson Med 2023; 31:3118. Blumenthal M, Holme HCM, Uecker M. Scaling nuFFT Memory-Overhead Down to Zero: Computational Trade-Offs and Memory-Efficient PICS-Reconstructions with BART. In: Proc Intl Soc Mag Reson Med 2023; 31:4947. - reproducible publications using BART - Uecker M, Lustig M. Estimating Absolute-Phase Maps Using ESPIRiT and Virtual Conjugate Coils. Magn Reson Med 2017; 77:1201-1207. https://github.com/mrirecon/vcc-espirit Rosenzweig S, Holme HCM, Wilke RN, Voit D, Frahm J, Uecker M. Simultaneous Multi-Slice Reconstruction Using Regularized Nonlinear Inversion: SMS-NLINV. Magn Reson Med 2018; 79:2057-2066. https://github.com/mrirecon/sms-nlinv Rosenzweig S, Holme HCM, Uecker M. Simple Auto-Calibrated Gradient Delay Estimation From Few Spokes Using Radial Intersections (RING). Magn Reson Med 2019; 81:1898-1906. https://github.com/mrirecon/ring Holme HCM, Rosenzweig S, Ong F, Wilke RN, Lustig M, Uecker M. ENLIVE: An Efficient Nonlinear Method for Calibrationless and Robust Parallel Imaging. Scientific Reports 2019; 9:3034. https://github.com/mrirecon/enlive Wang X, Kohler F, Unterberg-Buchwald C, Lotz J, Frahm J, Uecker M. Model-based myocardial T1 mapping with sparsity constraints using single-shot inversion-recovery radial FLASH cardiovascular magnetic resonance. J Cardioviasc Magn Reson 2019; 21:60. https://github.com/mrirecon/myocardial-t1-mapping Rosenzweig S, Scholand N, Holme HCM, Uecker M. Cardiac and Respiratory Self-Gating in Radial MRI using an Adapted Singular Spectrum Analysis (SSA-FARY). IEEE Trans Med Imag 2020; 39:3029-3041. https://github.com/mrirecon/SSA-FARY Wang X, Rosenzweig S, Scholand N, Holme HCM, Uecker M. Model-based Reconstruction for Simultaneous Multi-slice T1 Mapping using Single-shot Inversion-recovery Radial FLASH. Magn Reson Med 2021; 85:1258-1271. https://github.com/mrirecon/sms-t1-mapping Wang X, Tan Z, Scholand N, Roeloffs V, Uecker M. Physics-based Reconstruction Methods for Magnetic Resonance Imaging. Philos. Trans. R. Soc. A. 2021; 379:20200196. https://github.com/mrirecon/physics-recon Wang X, Rosenzweig S, Roeloffs V, Blumenthal M, Scholand N, Tan Z, Holme HCM, Unterberg-Buchwald C, Hinkel R, Uecker M. Free-breathing myocardial T1 mapping using inversion-recovery radial FLASH and motion-resolved model-based reconstruction. Magn Reson Med 2023; 89;1368-1384. https://github.com/mrirecon/motion-resolved-myocardial-T1-mapping Scholand N, Wang X, Roeloffs V, Rosenzweig S, Uecker M. Quantitative MRI by nonlinear inversion of the Bloch equations. Magn Reson Med 2023; 90:520-538. https://github.com/mrirecon/bloch-moba - sensitivity-encoded parallel imaging - (commands: itsense, pocsense, bpsense, pics) Ra JB and Rim CY. Fast imaging using subencoding data sets from multiple detectors. Magn Reson Med 1993; 30:142-145. Pruessmann KP, Weiger M, Scheidegger MB, Boesiger P. SENSE: Sensitivity encoding for fast MRI. Magn Reson Med 1999; 42:952-962. Pruessmann KP, Weiger M, Boernert P, Boesiger P. Advances in sensitivity encoding with arbitrary k-space trajectories. Magn Reson Med 2001; 46:638-651. Samsonov AA, Kholmovski EG, Parker DL, Johnson CR. POCSENSE: POCS-based reconstruction for sensitivity encoded magnetic resonance imaging. Magn Reson Med 2004; 52:1397-1406. - implementation of the (non-uniform) FFT - (commands: fft, nufft, nufftbase, pics) O’Sullivan JD. A fast sinc function gridding algorithm for Fourier inversion in computer tomography. IEEE Trans Med Imaging 1985; 4:200-207. Jackson JI, Meyer CH, Nishimura DG, Macovski A. Selection of a convolution function for Fourier inversion using gridding. IEEE Trans Med Imaging 1991; 3:473-478. Wajer F and Pruessmann KP. Major speedup of reconstruction for sensitivity­encoding with arbitrary trajectories. Annual Meeting of the ISMRM, Glasgow 2001, In: Proc Intl Soc Mag Reson Med 2001; 9:767. Frigo M, Johnson SG. The Design and Implementation of FFTW3. Proc IEEE 2005; 93:216-231. Uecker M, Zhang S, Frahm J. Nonlinear Inverse Reconstruction for Real-time MRI of the Human Heart Using Undersampled Radial FLASH. Magn Reson Med 2010; 63:1456-1462. Ong F, Uecker M, Jiang W, Lustig M. Fast Non-Cartesian Reconstruction with Pruned Fast Fourier Transform. Annual Meeting ISMRM, Toronto 2015, In: Proc Intl Soc Mag Reson Med 2015; 23:3639. Ong F, Uecker M, Lustig M. Accelerating non-Cartesian MRI reconstruction convergence using k-space preconditioning. IEEE Trans Med Imag 2019; 39:1646-1654. Blumenthal M, Holme HCM, Uecker M. Scaling nuFFT Memory-Overhead Down to Zero: Computational Trade-Offs and Memory-Efficient PICS-Reconstructions with BART. In: Proc Intl Soc Mag Reson Med 2023; 31:4947. - methods for sensitivity calibration - (commands: walsh, caldir, ecalib, ecaltwo) Walsh DO, Gmitro AF, Marcellin MW. Adaptive reconstruction of phased array MR imagery. Magn Reson Med 2000; 43:682-690. Griswold M, Walsh D, Heidemann R, Haase A, Jakob A. The Use of an Adaptive Reconstruction for Array Coil Sensitivity Mapping and Intensity Normalization Annual Meetig ISMRM, Honolulu 2002, In: Proc Intl Soc Mag Reson Med 2002; 10:2410. McKenzie CA, Yeh EN, Ohliger MA, Price MD, Sodickson DK. Self-calibrating parallel imaging with automatic coil sensitivity extraction. Magn Reson Med 2002; 47:529-538. Uecker M, Virtue P, Vasanawala SS, Lustig M. ESPIRiT Reconstruction Using Soft SENSE. Annual Meeting ISMRM, Salt Lake City 2013, In: Proc Intl Soc Mag Reson Med 2013; 21:127. Uecker M, Lai P, Murphy MJ, Virtue P, Elad M, Pauly JM, Vasanawala SS, Lustig M. ESPIRiT - An Eigenvalue Approach to Autocalibrating Parallel MRI: Where SENSE meets GRAPPA. Magn Reson Med 2014; 71:990-1001. - joint estimation: nonlinear inversion, calibrationless - (commands: nlinv, sake) Uecker M, Hohage T, Block KT, Frahm J. Image reconstruction by regularized nonlinear inversion-joint estimation of coil sensitivities and image content. Magn Reson Med 2008; 60:674-682. Bi Z, Uecker M, Jiang D, Lustig M, Ying K. Robust Low-rank Matrix Completion for sparse motion correction in auto calibration PI. Annual Meeting ISMRM, Salt Lake City 2013, In: Proc Intl Soc Mag Reson Med 2013; 21:2584. Shin PJ, Larson PEZ, Ohliger MA, Elad M, Pauly JM, Vigneron DB, Lustig M. Calibrationless Parallel Imaging Reconstruction Based on Structured Low-Rank Matrix Completion. Magn Reson Med 2014; 72:959-970. Holme HCM, Rosenzweig S, Ong F, Wilke RN, Lustig M, Uecker M. ENLIVE: An Efficient Nonlinear Method for Calibrationless and Robust Parallel Imaging. Scientific Reports 2019; 9:3034. - coil compression - (command: cc) Buehrer M, Pruessmann KP, Boesiger P, Kozerke S. Array compression for MRI with large coil arrays. Magn Reson Med 2007, 57:1131-1139. Huang F, Vijayakumar S, Li Y, Hertel S, Duensing GR. A software channel compression technique for faster reconstruction with many channels. Magn Reson Imaging 2008; 26:133-141. Zhang T, Pauly JM, Vasanawala SS, Lustig M. Coil compression for accelerated imaging with cartesian sampling. Magn Reson Med 2013; 69:571-582. Bahri D, Uecker M, Lustig M. ESPIRiT-Based Coil Compression for Cartesian Sampling. Annual Meeting ISMRM, Salt Lake City 2013, In: Proc Intl Soc Mag Reson Med 2013; 21:2657. Kim D, Cauley SF, Nayak KS, Leahy RM, Haldar JP. Region-optimized virtual (ROVir) coils: Localization and/or suppression of spatial regions using sensor-domain beamforming Magn Reson Med 2021; 86:197–212. - compressed sensing MRI - (commands: pocsense, pics) Block KT, Uecker M, Frahm J. Undersampled radial MRI with multiple coils. Iterative image reconstruction using a total variation constraint. Magn Reson Med 2007; 57:1086-1098. Lustig M, Donoho D, Pauly JM. Sparse MRI: The application of compressed sensing for rapid MR imaging. Magn Reson Med 2007; 58:1182-1195. Liu B, King K, Steckner M, Xie J, Sheng J, Ying L. Regularized sensitivity encoding (SENSE) reconstruction using Bregman iterations. Magn Reson Med 2009; 61:145-152. - non-linear model-based reconstruction - (commands: moba) Tong CY, Prato FS. A Novel Fast T1-Mapping Method. Magn Reson Imaging 1994; 4:701-708. McKenzie CA, Pereira RS, Prato FS, Chen Z, Drost DJ. Improved Contrast Agent Bolus Tracking Using T1 FARM. Magn Reson Med 1999; 41:429-435. Graff C, Li Z, Bilgin A, Altbach MI, Gmitro AF, Clarkson EW. Iterative T2 estimation from highly undersampled radial fast spin-echo data. ISMRM 2006; 14:925. Olafsson VT, Noll DC, Fessler JA. Fast joint reconstruction of dynamic and field maps in functional MRI. IEEE Trans Med Imag 2008; 27:1177–1188. Block KT, Uecker M, Frahm J. Model-Based Iterative Reconstruction for Radial Fast Spin-Echo MRI. IEEE Trans Med Imag 2009; 28:1759-1769. Wang X, Roeloffs V, Klosowski J, Tan Z, Voit D, Uecker M, Frahm J. Model-based T1 Mapping with Sparsity Constraints Using Single-Shot Inversion-Recovery Radial FLASH. Magn Reson Med 2018; 79:730-740. Wang X, Kohler F, Unterberg-Buchwald C, Lotz J, Frahm J, Uecker M. Model-based myocardial T1 mapping with sparsity constraints using single-shot inversion-recovery radial FLASH cardiovascular magnetic resonance. J Cardioviasc Magn Reson 2019; 21:60. Tan Z, Voit D, Kollmeier J, Uecker M, Frahm J. Dynamic Water/Fat Separation and B0 Inhomogeneity Mapping -- Joint Estimation Using Undersampled Triple-Echo Multi-Spoke Radial FLASH. Magn Reson Med 2019; 82:1000-1011. Wang X, Rosenzweig S, Scholand N, Holme HCM, Uecker M. Model-based Reconstruction for Simultaneous Multi-slice T1 Mapping using Single-shot Inversion-recovery Radial FLASH. Magn Reson Med 2021; 85:1258-1271. Wang X, Tan Z, Scholand N, Roeloffs V, Uecker M. Physics-based Reconstruction Methods for Magnetic Resonance Imaging. Philos. Trans. R. Soc. A. 2021; 379:20200196. Tan Z, Unterberg-Buchwald C, Blumenthal M, Scholand N, Schaten P, Holme HCM, Wang X, Raddatz D, Uecker M. Free-Breathing Liver Fat, R∗2 and B0 Field Mapping Using Multi-Echo Radial FLASH and Regularized Model-based Reconstruction. IEEE Trans Med Imag 2023; 42:1374-1387. Wang X, Rosenzweig S, Roeloffs V, Blumenthal M, Scholand N, Tan Z, Holme HCM, Unterberg-Buchwald C, Hinkel R, Uecker M. Free-breathing myocardial T1 mapping using inversion-recovery radial FLASH and motion-resolved model-based reconstruction. Magn Reson Med 2023; 89;1368-1384. Scholand N, Wang X, Roeloffs V, Rosenzweig S, Uecker M. Quantitative MRI by nonlinear inversion of the Bloch equations. Magn Reson Med 2023; 90:520-538. - subspace reconstruction - (commands: pics, signal, svd) Liang Z, Spatiotemporal Imaging with partially separable functions. 4th IEEE International Symposium on Biomedical Imaging: From Nano to Macro, 2007; 988-991. Petzschner FH, Ponce IP, Blaimer M, Jakob PM, Breuer FA. Fast MR parameter mapping using k‐t principal component analysis. Magn Reson Med 2011; 66;706-716. Mani M, Jacob M, Magnotta V, Zhong J. Fast iterative algorithm for the reconstruction of multishot non-cartesian diffusion data. Magn Reson Med 2015; 74:1086–1094. Tamir JI, Uecker M, Chen W, Lai P, Alley MT, Vasanawala SS, Lustig M. T2 shuffling: Sharp, multicontrast, volumetric fast spin‐echo imaging. Magn Reson Med 2017; 77:180-195. - sparsity transforms, variational penalties, regularization - (commands: cdf97, rof, tgv, lrmatrix, pocsense, pics) Rudin LI, Osher S, Fatemi E. Nonlinear total variation based noise removal algorithms, Physica D: Nonlinear Phenomena 1992; 60:259-268. Figueiredo MAT and Nowak RD. An EM algorithm for wavelet-based image restoration. IEEE Trans Image Process 2003; 12:906-916. Ong F, Uecker M, Tariq U, Hsiao A, Alley MT, Vasanawala SS, Lustig M. Robust 4D Flow Denoising using Divergence-free Wavelet Transform, Magn Reson Med 2015; 73:828-842. Ong F, Lustig M. Beyond low rank + sparse: Multi-scale low rank matrix decomposition, IEEE J Sel Topics Signal Process 2016; 10:672-687. Bredies K, Kunisch K, Pock T. Total generalized variation. SIAM Journal on Imaging Sciences 2010; 3:492-526. Luo G, Zhao N, Jiang W, Hui ES, Cao P. MRI reconstruction using deep Bayesian estimation. Magn Reson Med 2020; 84:2246-2261. Knoll F, Bredies K, Pock T, Stollberger R. Second order total generalized variation (TGV) for MRI. Magn Reson Med 2010; 65:480-491. - sampling schemes - (commands: traj, poisson, wave, wavepsf) Winkelmann S, Schaeffter T, Koehler T, Eggers H, Doessel O. An optimal radial profile order based on the Golden Ratio for time-resolved MRI. IEEE Trans Med Imaging 2007; 26:68-76. Lustig M, Alley M, Vasanawala S, Donoho DL, Pauly JM. L1 SPIR-iT: Autocalibrating Parallel Imaging Compressed Sensing Annual Meeting ISMRM, Honolulu 2009, In: Proc Intl Soc Mag Reson Med 2009; 17:379. Bilgic B, Gagoski BA, Cauley SF, Fan AP, Polimeni JR, Grant PE, Wald LL, Setsompop K. Wave-CAIPI for highly accelerated 3D imaging. Magn Reson Med 2014; 73:2152-2162. Wundrak S, Paul J, Ulrici J, Hell E, Geibel M-A, Bernhardt P, Rottbauer W, Rasche V. Golden ratio sparse MRI using tiny golden angles. Magn Reson Med 2016; 75:2372-2378. - trajectory correction - (commands: estdelay) Block KT, Uecker M. Simple Method for Adaptive Gradient-Delay Compensation in Radial MRI. Annual Meeting ISMRM, Montreal 2011, In: Proc. Intl. Soc. Mag. Reson. Med 2011; 19:2816. Rosenzweig S, Holme HCM, Uecker M. Simple Auto-Calibrated Gradient Delay Estimation From Few Spokes Using Radial Intersections (RING). Magn Reson Med 2019; 81:1898-1906. - acceleration with graphical processing units - (commands: pocsense, nufft, pics, nlinv) Uecker M, Zhang S, Frahm J. Nonlinear Inverse Reconstruction for Real-time MRI of the Human Heart Using Undersampled Radial FLASH. Magn Reson Med 2010; 63:1456-1462. Murphy M, Alley M, Demmel J, Keutzer K, Vasanawala S, Lustig M. Fast ℓ1-SPIRiT Compressed Sensing Parallel Imaging MRI: Scalable Parallel Implementation and Clinically Feasible Runtime. IEEE Trans Med Imaging 2012; 31:1250-1262. - numerical phantoms and signals - (commands: phantom, signal) Shepp LA, Logan BF. The Fourier reconstruction of a head section. IEEE T Nucl Sci 1974; 21:21-43. Koay CG, Sarlls JE, Özarslan E. Three-Dimensional Analytical Magnetic Resonance Imaging Phantom in the Fourier Domain. Magn Reson Med 2007; 58:430-436. Guerquin-Kern M, Lejeune L, Pruessmann KP, M Unser M, Realistic Analytical Phantoms for Parallel Magnetic Resonance Imaging. IEEE Trans Med Imaging 2012; 31:626-636. Look DC, Locker DR. Time Saving in Measurement of NMR and EPR Relaxation Times. Review of Scientific Instruments 1970; 41:250. Schmitt P, Griswold MA, Jakob PM, Kotas M, Gulani V, Flentje M, Haase A. Inversion recovery TrueFISP: Quantification of T1, T2, and spin density. Magn Reson Med 2004; 51:661-667. Hamilton G, Yokoo T, Bydder M, Cruite I, Schroeder ME, Sirlin CB, Middleton MS. In vivo characterization of the liver fat 1H MR spectrum. NMR Biomed 2011; 24:784-790. Assländer J, Novikov DS, Lattanzi R, Sodickson DK, Cloos MA. Hybrid-state free precession in nuclear magnetic resonance. Communications Physics. 2019; 2:73. Scholand N, Schilling M, Heide M, Uecker M. Digital Reference Objects with BART. In: Proc Intl Soc Mag Reson Med 2023; 31:3118. - machine learning - (command: mnist, nnet, reconet) Hammernik K, Klatzer T, Kobler E, Recht MP, Sodickson DK, Pock T, Knoll F. Learning a variational network for reconstruction of accelerated MRI data. Magn Reson Med 2018; 79:3055-3071. Aggarwal HK, Mani MP, Jacob M. MoDL: Model-Based Deep Learning Architecture for Inverse Problems. IEEE Trans Med Imaging 2019; 38:394--405. Pock T and Sabach S. Inertial Proximal Alternating Linearized Minimization (iPALM) for Nonconvex and Nonsmooth Problems. SIAM J Imaging Sci 2016; 9:1756--1787. Kingma DP and Ba J. Adam: A Method for Stochastic Optimization. arXiv preprint 2014, arXiv:1412.6980 - random numbers - (command: noise) and internally in others Salmon JK, Moraes MA, Dror RO, Shaw DE. Parallel random numbers: as easy as 1, 2, 3. SC '11: Proceedings of 2011 International Conference for High Performance Computing, Networking, Storage and Analysis; DOI: 10.1145/2063384.2063405 Lemire D. Fast Random Integer Generation in an Interval arXiv preprint 2018, arXiv:1805.10941 libbart-devel/doc/resize.txt000066400000000000000000000007371472525725500164260ustar00rootroot00000000000000bart resize help ------------------- If the input looks like this: [[1 2 3] [4 5 6]] bart resize -c 0 6 1 5 input output [[0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.] [0. 1. 2. 3. 0.] [0. 4. 5. 6. 0.] [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.]] bart resize 0 6 1 5 input output [[1. 2. 3. 0. 0.] [4. 5. 6. 0. 0.] [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.] [0. 0. 0. 0. 0.]] bart resize -c 0 1 1 2 input output [[4. 5.]] bart resize 0 1 1 2 input output [[1. 2.]] libbart-devel/doc/singularity/000077500000000000000000000000001472525725500167275ustar00rootroot00000000000000libbart-devel/doc/singularity/README.md000066400000000000000000000025371472525725500202150ustar00rootroot00000000000000# Singularity Container for BART Running BART on a high performance computing (HPC) cluster can be challenging due to missing libraries. One solution exploits containers. They encapsulating the software with all required dependencies. Here, we provide some basic information about how to run BART in a container using [Singularity](https://sylabs.io/singularity/). A blueprint to create a singularity container for BART can be found in the definition files * [`bart_ubuntu.def`](bart_ubuntu.def): for an Ubuntu 22.04 operating system. * [`bart_debian.def`](bart_debian.def): for a Debian 12 (bookworm) operating system. After installing singularity, a container `container.sif` can be created in the Singularity Image Format (SIF): ```code sudo singularity build container.sif bart.def ``` Both containers download and compile BART with all libraries including GPU support using CUDA. Make sure to select the CUDA version your local HPC host provides. You can start an interactive shell with ```code singularity shell --nv container.sif ``` and the `--nv` adds access to the installed Nvidia drivers on the host system. A BASH script can be executed inside the container with ```code singularity exec --nv container.sif bash 'recon.sh' ``` ### Note The definition files above also represent simple guides of how to install BART on the individual operating systems. libbart-devel/doc/singularity/bart_debian.def000066400000000000000000000035311472525725500216430ustar00rootroot00000000000000Bootstrap: docker From: debian:12 %labels Author Nick Scholand Version debian-v1.0 %help This is a container running debian bookworm (12) with the Berkeley Advanced Reconstruction Toolbox (BART) and its viewer tool. It deploys the code of a specified version and compiles it with GPU support using CUDA. Ensure to select the same CUDA version as installed on your host. To compile the specified BART version without CUDA support remove `CUDA=1` from the `printf` string below. %environment export BART_TOOLBOX_PATH=/bart export TOOLBOX_PATH=/bart export LD_LIBRARY_PATH=/bart/lib:$LD_LIBRARY_PATH export PATH=/bart:$PATH export PATH=/view:$PATH %post # Allow non-free packages sed -i -e 's/Components: main/Components: main contrib non-free non-free-firmware/g' /etc/apt/sources.list.d/debian.sources apt-get update # Install general libraries for BART and BART's viewer tool apt-get install -y libfftw3-dev liblapacke-dev libpng-dev libopenblas-dev libgtk-3-dev # Install useful tools for the installation process and workflows apt-get install -y make gcc git wget vim dpkg unzip screen time bc # Install CUDA CUDA_VERSION=12-3 wget https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/cuda-keyring_1.1-1_all.deb dpkg -i cuda-keyring_1.1-1_all.deb apt-get update DEBIAN_FRONTEND=noninteractive apt-get -y install cuda-${CUDA_VERSION} # `DEBIAN_FRONTEND=noninteractive` avoids keyboard pop-up # Install BART and compile it with GPU support BART_VERSION=0.9.00 wget https://github.com/mrirecon/bart/archive/v${BART_VERSION}.zip unzip v${BART_VERSION}.zip mv bart-${BART_VERSION} bart rm v${BART_VERSION}.zip cd bart touch Makefiles/Makefile.local printf "PARALLEL=1\nCUDA=1\nCUDA_BASE=/usr/local/cuda\nCUDA_LIB=lib64\n" > Makefiles/Makefile.local make cd .. # Install BART's viewer tool apt-get -y install bart-view libbart-devel/doc/singularity/bart_ubuntu.def000066400000000000000000000032771472525725500217520ustar00rootroot00000000000000Bootstrap: library From: ubuntu:22.04 %labels Author Nick Scholand Version ubuntu-v1.0 %help This is a container running Ubuntu 22.04 with the Berkeley Advanced Reconstruction Toolbox (BART) and its viewer tool. It deploys the code of a specified version and compiles it with GPU support using CUDA. Ensure to select the same CUDA version as installed on your host. To compile the specified BART version without CUDA support remove `CUDA=1` from the `printf` string below. %environment export BART_TOOLBOX_PATH=/bart export TOOLBOX_PATH=/bart export LD_LIBRARY_PATH=/bart/lib:$LD_LIBRARY_PATH export PATH=/bart:$PATH export PATH=/view:$PATH %post apt-get update # Install general libraries for BART and BART's viewer tool apt-get install -y libfftw3-dev liblapacke-dev libpng-dev libopenblas-dev libgtk-3-dev # Install useful tools for the installation process and workflows apt-get install -y make gcc git wget vim dpkg unzip screen time bc # Install CUDA CUDA_VERSION=12-0 wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb dpkg -i cuda-keyring_1.1-1_all.deb apt-get update DEBIAN_FRONTEND=noninteractive apt-get -y install cuda-${CUDA_VERSION} # `DEBIAN_FRONTEND=noninteractive` avoids keyboard pop-up # Install BART and compile it with GPU support BART_VERSION=0.9.00 wget https://github.com/mrirecon/bart/archive/v${BART_VERSION}.zip unzip v${BART_VERSION}.zip mv bart-${BART_VERSION} bart rm v${BART_VERSION}.zip cd bart touch Makefiles/Makefile.local printf "PARALLEL=1\nCUDA=1\nCUDA_BASE=/usr/local/cuda\nCUDA_LIB=lib64\n" > Makefiles/Makefile.local make cd .. # Install BART's viewer tool apt-get -y install bart-view libbart-devel/doc/style.txt000066400000000000000000000117161472525725500162640ustar00rootroot00000000000000 1. Language The main language is C11 with commonly used GNU extensions (gnu11) as supported by the GCC and clang compilers. 1.1. Standard types: The complex types are the standard types introduced with ISO C99. #include complex float complex double In headers we use _Complex without including the standard header for complex types for compatibility with C++. Similarly, we use the standard boolean type. #include bool x = true; 1.2. Arrays Large multi-dimensional arrays should use our md_* functions. Small arrays should use (variable-length) arrays to increase type-safety. Pointer arithmetic should be avoided. float kernel[N][3]; complex float* image = md_alloc(N, dims, CFL_SIZE); In headers, we use the __VLA(x) macro for compatibility with C++ when this is possible. 1.2. GNU Extensions: Some extensions a commonly supported by compilers and useful. statement expressions ({ }) __typeof__ const array parameters 1.3. Type safety void* and casts should be used only when necessary. Functions must have a prototype. Variable-length arrays are preferred over basic pointers. Macros can often be made type-safe, e.g. using the TYPE_CHECK macro. structs should be defined in the source (.c) file whenever possible to enforce modularization ("opaque pointers"). 2. Coding Style Coding style are meant as guidelines. It is OK to deviate from the guidelines in situations, if it helps to make the code easier to understand. 2.1. Indentation Indentation using a single tab. A tab is considered 8 characters. White space errors (white space after the last visible character of a line) should be avoided. Labels should be indented one tab less than the actual code. pragmas should start at the beginning of a line. The "omp" in OpenMP pragmas should be aligned to the affected statement by use of tabs after the initial "pragma". Lines should not exceed 80 to 100 characters. 2.2. Expressions There should be no space after the opening or before the closing bracket. There should be a single space before and after any operator except for prefix and postfix operators. Subexpressions should be enclosed in brackets and not rely on operator precedence for correct evaluation order. int i = (3 + x++) * 2; If there is a constant involved in a comparison the constant should be on the left side. if (0. == x) return false; The type of the controlling expression used in if statements or loops should be boolean. Pointers and integers should not implicitly compared to NULL or zero. if (NULL == foo_ptr) foo_ptr = init_foo_ptr(); 2.3. Statement groups. Opening curly brace is on the next line for functions and on the same line for if, for, while, and switch statements. In the latter case there should be an empty line afterwards. In case only a single statement follows an if, for, or while statement, the statement block can be omitted - but for if-else pairs only if it can be omitted on both sides. There should be no empty line before the closing bracket. if (0. == x) { ... } if (0. == x) y = 3; Statements should be grouped in logical blocks by a single empty line. In particular, declarations, definitions (with initialization) should be separated from other statements. Memory allocation and deallocation should usually be separated. Multiple declarators in a single declaration should be avoided. void compute_bar(complex float* bar) { complex float* foo = md_alloc(); compute_foo(foo); compute_bar_from_foo(bar, foo); md_free(foo); } 2.4. Comments: Comments should be used to document the API, explain complicated code and algorithms, and give required background information. Comments are not meant to explain things a competent programmer should know by reading the code. Good: // gram-schmidt algorithm for (...) { for (..) { ... } } Bad: // initialize foo int foo = 3; // config struct struct foo_conf_s conf = ... // allocate memory complex float* x = md_alloc(...); 2.5. Variable and Function Names Functions and variables names should be lower case and use '_' has separator of components. Names should be meaningful but not unnecessary long. If possible, use self-explanatory variable names. Except for loop indices, where one-letter variables names can be used. float norm = calc_frobenius_norm(image) This is preferable to adding comments: // calculate frobenous norm float n = clc_frbn(i); On the other hand, for often used functions a short name is appropriate. For example, we use md_fmac() instead of multidim_fused_multiply_accumulate() Locally used loop indices can be single character letters: i, j, k 2.6. Includes System headers should be included first, followed by headers from other modules, headers from the same module and finally the header belonging to the current file. Include guards should use the following style __NAME_H where NAME is the basename of the header file. libbart-devel/doc/tensorflow.txt000066400000000000000000000057311472525725500173260ustar00rootroot00000000000000 TensorFlow graphs can be wrapped in an nlop (non-linear operator) to be called in BART. TensorFlow v1 Graphs and TensorFlow v2 SavedModel are supported. In python/bart_tf.py, we provide wrapping functions to export TensorFlow graphs. 1. Tensor data types and shapes nlops work with single precision complex floats. From the TensorFlow side, we support the tf.complex64 and tf.float32 data types. Tensors with data type tf.float32 must have a 2 in the last dimension to stack real and imaginary part. BART will create an nlop ignoring this dimension. BART uses Fortran ordering for dimensions. Hence, the nlop will have flipped dimensions compared to the TensorFlow graph. 2. Naming Conventions The inputs of the TensorFlow graph should be named "input_0", "input_1", ... The outputs of the TensorFlow graph should be named "output_0", "output_1", ... TensorFlow v1 graphs are exported respecting the names for inputs/outputs which are assigned by the user. TensorFlow v2 SavedModels assign new names for the saved inputs/outputs, usually of the form: input_0 -> input_0 serving_default_input_0 output_0 -> StatefulPartitionedCall The names can be inspected using "saved_model_cli" tool provided by TensorFlow. To provide the mapping to BART, include a file named "bart_config.dat" in the SavedModel directory containing the mapping in the following structure: # ArgumentNameMapping serving_default input_0 serving_default_input_0 0 grad_ys_0 serving_default_grad_ys_0 0 grad_0_0 StatefulPartitionedCall 0 output_0 StatefulPartitionedCall 1 Here "serving_default" is the signature and the integer in the last column is the index of the operation. 3. Automatic Differentiation We provide three methods to use TensorFlow automatic differentiation in BART: 3.1 Backpropagation For each output o provide an input "grad_ys_o" and for each combination of outputs o and inputs i provide the gradient with name "grad_i_o". The shape of "grad_ys_o" must equal the shape of "output_o" and the shape of "grad_i_o" must equal the shape of "input_i" 3.2 Jacobian The forward path can directly output the complete Jacobian matrix. For this, all dimensions of the input and output should either equal or one must be one. We assume that the operator and hence the Jacobian is block diagonal. 3.2.1 Holomorphic Functions The output of the Jacobian should be named "jacobian_0_0" (only one input and one output is supported). Please note, that the Jacobian computed by TensorFlow (tape.jacobian) is the complex conjugate of the actual Jacobian! 3.2.2 Non-Holomorphic Functions For non-holomorphic functions, the Jacobian of the real-valued function can be provided with the name "jacobian_real_0_0". This Jacobian should have (TensorFlow)-dimensions [ ... , 2, 2 ] where the 2x2 matrix contains the real valued derivatives: [ ... , 0, 0 ]: d real / d real [ ... , 0, 1 ]: d real / d imag [ ... , 1, 0 ]: d imag / d real [ ... , 1, 1 ]: d imag / d imaglibbart-devel/doc/webasm.txt000066400000000000000000000111231472525725500163720ustar00rootroot00000000000000A short guide on how I compiled BART to WebAssembler: 1. Download (tested versions.): - emscripten (3.1.41) - OpenBLAS source (0.3.23) - libfftw3 source (3.3.10) - BlocksRuntime source (standalone from git) - bart source 2. Prepare: - use emsdk to install and activate latest emscripten. - create a folder for the webasm libraries: e.g. mkdir $HOME/wasm_libs 3. Compile OpenBLAS: - compiling this is a bit troublesome, for me it only works on a linux system and not on windows with wsl. - a few month ago OpenBLAS got support for emscripten so just use this make command to compile: make CC=emcc HOSTCC=gcc TARGET=RISCV64_GENERIC NOFORTRAN=1 USE_THREAD=0 - install the results outside of the standard places in /usr/ ! e.g. somewhere in your home directory make CC=emcc HOSTCC=gcc TARGET=RISCV64_GENERIC NOFORTRAN=1 USE_THREAD=0 PREFIX=$HOME/wasm_libs install 4. Compile libfftw3 - again: set install prefix outside of /usr/ ! - enable float and disable fortran emconfigure ./configure --prefix=$HOME/wasm_libs --disable-fortran --enable-float emmake make make install 5. Compile BlocksRuntime CC=emcc AR=emar RANLIB=emranlib ./buildlib env DESTDIR=$HOME/wasm_libs ./installlib 6. Compile bart - create a Makefile.local in folder Makefiles - content should be this: """ FFTW_BASE=$(HOME)/wasm_libs BLAS_BASE=$(HOME)/wasm_libs CC=emcc CFLAGS+=-fblocks OPENBLAS=1 FORTRAN=0 FFTWTHREADS=0 USE_THREAD=1 SHARED=1 PNG=0 OMP=0 TENSORFLOW=0 DEBUG=0 MKL=0 ACML=0 CUDA=0 CUDNN=0 ISMRMRD=0 """ - don't add libBlocksRuntime.a to LDFLAGS! clang will crash. - then compile, no need for emmake just make: make - Troubleshoot: the compilation will fail if /usr/include is included (-I/usr//include). - Check if you have set all base directories outside of /usr or disabled them (e.g. ACML) 6. Include into your WebAssembler project - copy the bart.o file to where you want it - add it to the emcc call as an input file e.g.: emcc -O3 -Wall bart.o -s EXPORTED_FUNCTIONS="['__Block_object_dispose','_malloc','_free', \ '_calc_phantom', '_calc_bart', '_calc_circ', '_fftc','_ifftc', '_num_init', '_pha_opts_defaults', \ '_memcfl_create', '_load_cfl', '_main_ecalib', '_main_pics', '_main_phantom', '_main_fft']" \ -s ALLOW_MEMORY_GROWTH=1 -s MAXIMUM_MEMORY=4GB -o ./web/wwwroot/bart.js $HOME/wasm_libs/lib/libfftw3f.a \ $HOME/wasm_libs/lib/libopenblas.a $HOME/wasm_libs/usr/local/lib/libBlocksRuntime.a - the script build_webasm.sh builds the files needed for the included web examples - important to include all libfftw3.a, libopenblas.a and also libBlocksRuntime.a (here clang doesn't crash) - all functions, variables you want to access in the js files have to be exported. - you definitely have to include '__Block_Object_dispose' even if you don't call this function in your js code! - to allocate and free memory include '_malloc', '_free' - In the example, given above, all functions needed for the small web example are exported, the second line contains names to call the bart functions directly, the third line uses the command line functions. - The "_" in front of the name is mandatory. The c function calc_phantom is called "_calc_phantom" in JS - MAXIMUM_MEMORY=4GB raises the amount of ram the browser is allowed to use, ALLOW_MEMORY_GROWTH=1 allows dynamic allocation. - if you want to call a js Function from c use -s EXPORTED_RUNTIME_METHODS="['addFunction', 'removeFunction']" -s RESERVED_FUNCTION_POINTERS=2 - Module.addFunction adds a callback and returns the c pointer. - set the reserved function pointers to the number of functions u need at the same time. - Results in two files: a.out.js and a.out.wasm (can be changed to anything with -o filename.js but the .js at the end is important) - Both have to be in the same directory on the web server ====== Embedding in JupyterLite: - Install JupyterLite CLI: python -m pip install jupyterlite-core - Install Pyodide kernel: jupyterlite-pyodide-kernel - Create folder, e.g.: mkdir lite - jupyter lite init --output-dir demo --port 8000 - Add a folder for python packages: mkdir lite/pypi - Build BART python package: from bart/pkg/python, python3 -m build --wheel - Copy wheel: e.g. cp bart/pkg/python/dist/bart-0.0.1-py3-none-any.whl lite/pypi - Build bart wasm: bart/build_webasm_main.sh - Copy files to web: cp bart/web/wwwroot/bart_main.{js,wasm} /bart/web/wwwroot/bart_{base,worker}.js lite/demo/extensions/@jupyterlite/pyodide-kernel-extension/static/ - jupyter lite build Plain pyodide repl works similar libbart-devel/doxyconfig000066400000000000000000002342241472525725500157130ustar00rootroot00000000000000# Doxyfile 1.8.3.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = "Berkeley Advanced Reconstruction Toolbox (BART)" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. #PROJECT_NUMBER = 0.3 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = "" # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = doc/dx/ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. Note that you specify absolute paths here, but also # relative paths, which will be relative from the directory where doxygen is # started. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, # and language is one of the parsers supported by doxygen: IDL, Java, # Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, # C++. For instance to make doxygen treat .inc files as Fortran files (default # is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note # that for custom extensions you also need to set FILE_PATTERNS otherwise the # files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # When enabled doxygen tries to link words that correspond to documented classes, # or namespaces to their corresponding documentation. Such a link can be # prevented in individual cases by by putting a % sign in front of the word or # globally by setting AUTOLINK_SUPPORT to NO. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES (the # default) will make doxygen replace the get and set methods by a property in # the documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields will be shown inline in the documentation # of the scope in which they are defined (i.e. file, namespace, or group # documentation), provided this scope is documented. If set to NO (the default), # structs, classes, and unions are shown on a separate page (for HTML and Man # pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penalty. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will roughly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. SYMBOL_CACHE_SIZE = 0 # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given # their name and scope. Since this can be an expensive process and often the # same symbol appear multiple times in the code, doxygen keeps a cache of # pre-resolved symbols. If the cache is too small doxygen will become slower. # If the cache is too large, memory is wasted. The cache size is given by this # formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal # scope will be included in the documentation. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = NO # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if section-label ... \endif # and \cond section-label ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. Do not use # file names with spaces, bibtex cannot handle them. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = *.c *.cc *.cxx *.cpp *.c++ *.cu *.h *.py *.m # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = src/spirit-1.0 src/spirit-2.0 src/butterfly # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = # If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page (index.html). # This can be useful if you have a project on for instance GitHub and want reuse # the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C, C++ and Fortran comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If left blank doxygen will # generate a default style sheet. Note that it is recommended to use # HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this # tag will in the future become obsolete. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify an additional # user-defined cascading style sheet that is included after the standard # style sheets created by doxygen. Using this option one can overrule # certain style aspects. This is preferred over using HTML_STYLESHEET # since it does not replace the standard style sheet and is therefore more # robust against future updates. Doxygen will copy the style sheet file to # the output directory. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of # entries shown in the various tree structured indices initially; the user # can expand and collapse entries dynamically later on. Doxygen will expand # the tree to such a level that at most the specified number of entries are # visible (unless a fully collapsed tree already exceeds this amount). # So setting the number of entries 1 will produce a full collapsed tree by # default. 0 is a special value representing an infinite number of entries # and will result in a full expanded tree by default. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely # identify the documentation publisher. This should be a reverse domain-name # style string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # thA MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and # SVG. The default value is HTML-CSS, which is slower, but has the best # compatibility. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a web server instead of a web client using Javascript. # There are two flavours of web server based search depending on the # EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for # searching and an index file used by the script. When EXTERNAL_SEARCH is # enabled the indexing and searching needs to be provided by external tools. # See the manual for details. SERVER_BASED_SEARCH = NO # When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP # script for searching. Instead the search results are written to an XML file # which needs to be processed by an external indexer. Doxygen will invoke an # external search engine pointed to by the SEARCHENGINE_URL option to obtain # the search results. Doxygen ships with an example indexer (doxyindexer) and # search engine (doxysearch.cgi) which are based on the open source search engine # library Xapian. See the manual for configuration details. EXTERNAL_SEARCH = NO # The SEARCHENGINE_URL should point to a search engine hosted by a web server # which will returned the search results when EXTERNAL_SEARCH is enabled. # Doxygen ships with an example search engine (doxysearch) which is based on # the open source search engine library Xapian. See the manual for configuration # details. SEARCHENGINE_URL = # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed # search data is written to a file for indexing by an external tool. With the # SEARCHDATA_FILE tag the name of this file can be specified. SEARCHDATA_FILE = searchdata.xml # When SERVER_BASED_SEARCH AND EXTERNAL_SEARCH are both enabled the # EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is # useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple # projects and redirect the results back to the right project. EXTERNAL_SEARCH_ID = # The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen # projects other than the one defined by this configuration file, but that are # all added to the same external search index. Each project needs to have a # unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id # of to a relative location where the documentation can be found. # The format is: EXTRA_SEARCH_MAPPINGS = id1=loc1 id2=loc2 ... EXTRA_SEARCH_MAPPINGS = #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = YES # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load style sheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # manageable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES libbart-devel/genctags000077500000000000000000000001251472525725500153270ustar00rootroot00000000000000#!/bin/bash ctags --langmap=c++:+.cu --extra=+f `find . -regex '.*\.[ch]u*' -print` libbart-devel/git-version.sh000077500000000000000000000006131472525725500164150ustar00rootroot00000000000000#!/bin/bash if test -d ${GIT_DIR:-.git} -o -f .git then GDOUT=`git describe --abbrev=7 --match "v*" --dirty 2>&1` if [[ $? -eq 0 ]]; then echo ${GDOUT} git describe --abbrev=7 --match "v*" | cut -f1 -d'-' > version.txt else if git diff --quiet --exit-code then cat version.txt else var=`cat version.txt` echo ${var}-dirty fi fi else cat version.txt fi libbart-devel/lib/000077500000000000000000000000001472525725500143565ustar00rootroot00000000000000libbart-devel/lib/.gitignore000066400000000000000000000001071472525725500163440ustar00rootroot00000000000000# Ignore everything in this directory * # Except this file !.gitignore libbart-devel/makedoc.sh000077500000000000000000000001271472525725500155520ustar00rootroot00000000000000#!/bin/bash ( cat doxyconfig ; echo "PROJECT_NUMBER=$(cat version.txt)" ) | doxygen - libbart-devel/matlab/000077500000000000000000000000001472525725500150505ustar00rootroot00000000000000libbart-devel/matlab/bart.m000066400000000000000000000075701472525725500161670ustar00rootroot00000000000000function [varargout] = bart(cmd, varargin) % BART Call BART command from Matlab. % [varargout] = BART(cmd, varargin) to run given bart command (cmd) using the % data arrays/matrices passed as varargin. % % [A, B] = BART('command', X, Y) call command with inputs X Y and outputs A B % % To output a list of available bart commands simply run "bart". To % output the help for a specific bart command type "bart command -h". % % Parameters: % cmd: Command to run as string (including non data parameters) % varargin: Data arrays/matrices used as input % % Example: % bart traj -h % [reco] = bart('nufft -i traj', data) call nufft with inputs data and outputs reco % % Authors: % 2014-2016 Martin Uecker % 2018 (Edited for WSL) Soumick Chatterjee % 2020 Martin Krämer % 2022 Jon Tamir % Check input variables if nargin==0 || isempty(cmd) fprintf('Usage: bart \n\n'); cmd = ''; end % Check bart toolbox path [bart_path, isWSL] = get_bart_path(); if isempty(bart_path) error('BART path not detected.'); end % Clear the LD_LIBRARY_PATH environment variable (to work around a bug in Matlab). % Store original library path to be restored later. if ismac==1 libPath = getenv('DYLD_LIBRARY_PATH'); setenv('DYLD_LIBRARY_PATH', ''); else libPath = getenv('LD_LIBRARY_PATH'); setenv('LD_LIBRARY_PATH', ''); end % Strip string arguments that were passed as varargin strArgsInd = cellfun(@ischar,varargin); strArgs = varargin(strArgsInd); dataArgs = varargin(~strArgsInd); if (~isempty(strArgs)) % append to cmd cmd = sprintf('%s %s', cmd, sprintf('%s ', strArgs{:})); cmd(end) = []; end % Root path for temporary file name = tempname; % Files used for input in = cell(1, length(dataArgs)); for iFile = 1:length(dataArgs) in{iFile} = strcat(name, 'in', num2str(iFile)); writecfl(in{iFile}, dataArgs{iFile}); end in_str = sprintf(' %s', in{:}); % Files used for output out = cell(1, nargout); for iFile = 1:nargout out{iFile} = strcat(name, 'out', num2str(iFile)); end out_str = sprintf(' %s', out{:}); % Run bart if ispc % running windows? if isWSL % For WSL and modify paths in_strWSL = wslPathCorrection(in_str); out_strWSL = wslPathCorrection(out_str); final_strWSL = ['wsl ', bart_path, '/bart ', cmd, ' ', in_strWSL, ' ', out_strWSL]; ERR = system(final_strWSL); else % For cygwin use bash and modify paths ERR = system(['bash.exe --login -c ', ... strrep(bart_path, filesep, '/'), ... '"', '/bart ', strrep(cmd, filesep, '/'), ' ', ... strrep(in_str, filesep, '/'), ... ' ', strrep(out_str, filesep, '/'), '"']); end else ERR = system([bart_path, '/bart ', cmd, ' ', in_str, ' ', out_str]); end % Remove input files for iFile = 1:length(in) if (exist(strcat(in{iFile}, '.cfl'),'file')) delete(strcat(in{iFile}, '.cfl')); end if (exist(strcat(in{iFile}, '.hdr'),'file')) delete(strcat(in{iFile}, '.hdr')); end end % Remove output files for iFile = 1:length(out) if ERR == 0 varargout{iFile} = readcfl(out{iFile}); end if (exist(strcat(out{iFile}, '.cfl'),'file')) delete(strcat(out{iFile}, '.cfl')); end if (exist(strcat(out{iFile}, '.hdr'),'file')) delete(strcat(out{iFile}, '.hdr')); end end % Restore Library Path to it's original value if (~isempty(libPath)) if ismac==1 setenv('DYLD_LIBRARY_PATH', libPath); else setenv('LD_LIBRARY_PATH', libPath); end end % Check if running BART was successful if (ERR~=0) && (~isempty(cmd)) error('command exited with an error'); end end libbart-devel/matlab/get_bart_path.m000066400000000000000000000015661472525725500200410ustar00rootroot00000000000000function [bart_path, isWSL] = get_bart_path() % BART get BART for Matlab. % [bart_path, isWSL] = get_bart_path() will return the bart path as seen by Matlab, % and optionally will return whether WSL was detected. % % Authors: % 2022 Jon Tamir % Check bart toolbox path bart_path = getenv('BART_TOOLBOX_PATH'); isWSL = false; if isempty(bart_path) if ~isempty(getenv('TOOLBOX_PATH')) bart_path = getenv('TOOLBOX_PATH'); elseif exist('/usr/local/bin/bart', 'file') bart_path = '/usr/local/bin'; elseif exist('/usr/bin/bart', 'file') bart_path = '/usr/bin'; else % Try to execute bart inside wsl, if it works, then it returns status 0 [bartstatus, ~] = system('wsl bart version -V'); if bartstatus==0 [~, bart_path] = system('wsl dirname $(which bart)'); bart_path = strip(bart_path); isWSL = true; end end end end libbart-devel/matlab/readcfl.m000066400000000000000000000022341472525725500166270ustar00rootroot00000000000000function data = readcfl(filenameBase) %READCFL Read complex data from file. % READCFL(filenameBase) read in reconstruction data stored in filenameBase.cfl % (complex float) based on dimensions stored in filenameBase.hdr. % % Parameters: % filenameBase: path and filename of cfl file (without extension) % % Written to edit data with the Berkeley Advanced Reconstruction Toolbox (BART). % % Copyright 2016. CBClab, Maastricht University. % 2016 Tim Loderhose (t.loderhose@student.maastrichtuniversity.nl) dims = readReconHeader(filenameBase); filename = strcat(filenameBase,'.cfl'); fid = fopen(filename); data_r_i = fread(fid, prod([2 dims]), '*float32'); data_r_i = reshape(data_r_i, [2 dims]); data = complex(zeros(dims,'single'),0); data(:) = complex(data_r_i(1,:),data_r_i(2,:)); fclose(fid); end function dims = readReconHeader(filenameBase) filename = strcat(filenameBase,'.hdr'); fid = fopen(filename); line = getNextLine(fid); dims = str2num(line); fclose(fid); end function line = getNextLine(fid) line = fgetl(fid); while(line(1) == '#') line = fgetl(fid); end end libbart-devel/matlab/test_bart.m000066400000000000000000000072631472525725500172250ustar00rootroot00000000000000function test_bart() %TEST_BART Runs a unit test for the MATLAB BART wrapper. % TEST_BART() can be used to test if the BART toolbox is properly set-up % and if changes/additions made to the MATLAB wrapper break any core % functionality of the MATLAB wrapper. % % Copyright 2020. Martin Krämer (Martin.Kraemer@uni-jena.de) % 2022 Jon Tamir %% Test setup testLog = []; capture = false; tolFloat = 1e-7; %% Test1: Environmental variable bartPath = get_bart_path() testAssert(~isempty(bartPath), 'BART path'); %% Test2: Write/Read cfl file = tempname; data = rand(32,24,16); testRun('writecfl(file, data)','Write cfl file'); dataRead = testRun('readcfl(file)','Read cfl file', 1); testAssert(~any(reshape(abs(data-dataRead{1}),[],1) > tolFloat), 'Data consistency cfl file'); if (exist(strcat(file,'.cfl'),'file')) delete(strcat(file,'.cfl')) end %% Test3: Run bart with various parameters testRun('bart', 'Wrapper (without parameter)'); testRun('bart traj -h', 'Wrapper (method help)'); phantom = testRun("bart('phantom')", "Wrapper (No input, no parameter)", 1); testAssert(~isempty(phantom{1}), "Wrapper (No input, no parameter) - check output"); phantom = testRun("bart('phantom -3')", "Wrapper (No input)", 1); testAssert(~isempty(phantom{1}), "Wrapper (No input) - check output)"); phantom_kSpace = testRun("bart('fft -u 3', varargin{1})", "Wrapper (One input, one parameter)", 1, phantom{1}); testAssert(~isempty(phantom_kSpace{1}), "Wrapper (One input, one parameter) - check output)"); %% Check final test score failCount = sum(cellfun(@(x)(~x),testLog(:,2))); if (failCount == 0) fprintf('\nTEST Result: All Tests Passed!\n\n'); else fprintf(2, '\nTEST Result: %i Tests Failed!\n\n', failCount); end %% Helper functions function [Result] = testRun(Command, Name, OutCount, varargin) if (nargin < 3) OutCount = []; Result = []; end fprintf('TEST [%s] - running "%s" ', Name, Command); status = false; try % when not printing to console (capture = true) we use evalc, % otherwise eval is used if (capture) fprintf('\n'); if (isempty(OutCount)) eval(Command); else % to actually capture and return the output we have % pre initialize the results cell array with the % pre-defined number of outputs to capture Result = cell(OutCount); [Result{:}] = eval(Command); end else if (isempty(OutCount)) evalc(Command); else Result = cell(OutCount); [~, Result{:}] = evalc(Command); end end status = true; catch end testLog = cat(1, testLog, {Name, status}); fprintf(2 - status, '>> %s\n', testStatusToString(status)); end function testAssert(Condition, Name) fprintf('TEST [%s] ', Name); testLog = cat(1, testLog, {Name, Condition}); fprintf(2 - Condition, '>> %s\n', testStatusToString(Condition)); end function [StatusString] = testStatusToString(Status) if (Status) StatusString = 'Passed'; else StatusString = 'Failed'; end end function printLog(log) for iLog = 1:size(log,1) fprintf('%s: %s\n', log{iLog, 1}, log{iLog, 2}); end end end libbart-devel/matlab/writecfl.m000066400000000000000000000023771472525725500170560ustar00rootroot00000000000000function writecfl(filenameBase,data) %WRITECFL Write complex data to file. % WRITECFL(filenameBase, data) writes reconstruction data to % filenameBase.cfl (complex float) and its dimensions to filenameBase.hdr. % % Written to edit data with the Berkeley Advanced Reconstruction Toolbox (BART). % % Parameters: % filenameBase: path and filename of cfl file (without extension) % data: array/matrix to be written % % Copyright 2013. Joseph Y Cheng. % Copyright 2016. CBClab, Maastricht University. % 2012 Joseph Y Cheng (jycheng@mrsrl.stanford.edu). % 2016 Tim Loderhose (t.loderhose@student.maastrichtuniversity.nl). dims = size(data); writeReconHeader(filenameBase,dims); filename = strcat(filenameBase,'.cfl'); fid = fopen(filename,'w'); data = data(:); fwrite(fid,[real(data)'; imag(data)'],'float32'); fclose(fid); end function writeReconHeader(filenameBase,dims) filename = strcat(filenameBase,'.hdr'); fid = fopen(filename,'w'); fprintf(fid,'# Dimensions\n'); for N=1:length(dims) fprintf(fid,'%d ',dims(N)); end if length(dims) < 5 for N=1:(5-length(dims)) fprintf(fid,'1 '); end end fprintf(fid,'\n'); fclose(fid); end libbart-devel/matlab/wslPathCorrection.m000066400000000000000000000007161472525725500207040ustar00rootroot00000000000000%Soumick Chatterjee function [outData] = wslPathCorrection(inData) outData=inData; for i = 'a':'z' %Replace drive letters with /mnt/ outData=strrep(outData,[i,':'],['/mnt/',i]); %if drive letter is supplied in lowercase outData=strrep(outData,[upper(i),':'],['/mnt/',i]); %if drive letter is supplied as uppercase end outData = strrep(outData, '\', '/'); %Change windows filesep to linux filesep end libbart-devel/msys_setup.sh000066400000000000000000000007731472525725500163660ustar00rootroot00000000000000#!/bin/bash pacman --sync --noconfirm --needed base-devel mingw-w64-x86_64-toolchain git mingw-w64-x86_64-fftw mingw-w64-x86_64-openblas mingw-w64-x86_64-libpng echo "Installing /usr/lib/librt.a" CURRENT_PATH=$(pwd) cd / curl https://repo.msys2.org/msys/x86_64/msys2-runtime-devel-3.2.0-3-x86_64.pkg.tar.zst | tar -I zstd -x usr/lib/librt.a cd $CURRENT_PATH GCC_PATH="/mingw64/bin" if [ -d "$GCC_PATH" ] && [[ ":$PATH:" != *":$GCC_PATH:"* ]]; then echo "export PATH=$GCC_PATH:\$PATH" >> ~/.bashrc fi libbart-devel/pkg/000077500000000000000000000000001472525725500143715ustar00rootroot00000000000000libbart-devel/pkg/python/000077500000000000000000000000001472525725500157125ustar00rootroot00000000000000libbart-devel/pkg/python/LICENSE000077700000000000000000000000001472525725500203452../../LICENSEustar00rootroot00000000000000libbart-devel/pkg/python/README.md000077700000000000000000000000001472525725500210712../../README.mdustar00rootroot00000000000000libbart-devel/pkg/python/bart/000077500000000000000000000000001472525725500166425ustar00rootroot00000000000000libbart-devel/pkg/python/bart/__init__.py000066400000000000000000000001011472525725500207430ustar00rootroot00000000000000from .bart import bart, bart2 from .cfl import readcfl, writecfl libbart-devel/pkg/python/bart/bart.py000077700000000000000000000000001472525725500236232../../../python/bart.pyustar00rootroot00000000000000libbart-devel/pkg/python/bart/cfl.py000077700000000000000000000000001472525725500232532../../../python/cfl.pyustar00rootroot00000000000000libbart-devel/pkg/python/bart/wslsupport.py000077700000000000000000000000001472525725500264472../../../python/wslsupport.pyustar00rootroot00000000000000libbart-devel/pkg/python/pyproject.toml000066400000000000000000000006221472525725500206260ustar00rootroot00000000000000[build-system] requires = ["setuptools"] build-backend = "setuptools.build_meta" [project] name = "bart" description = "Python wrapper for the Berkeley Advanced Reconstruction Toolbox (BART). Requires existing BART installation." dependencies = ["numpy"] dynamic = ["version"] [project.urls] Homepage = "https://mrirecon.github.io/bart/" [tool.setuptools.dynamic] version = {file = ["version.txt"]} libbart-devel/pkg/python/version.txt000077700000000000000000000000001472525725500230072../../version.txtustar00rootroot00000000000000libbart-devel/pkg/rpm/000077500000000000000000000000001472525725500151675ustar00rootroot00000000000000libbart-devel/pkg/rpm/bart.spec000066400000000000000000000045131472525725500167760ustar00rootroot00000000000000Name: bart Version: {{{ bart_version }}} Release: {{{ bart_release }}}%{?dist} Epoch: 1 Summary: tools for computational magnetic resonance imaging License: BSD URL: https://mrirecon.github.io/bart VCS: {{{ git_repo_vcs }}} Source0: {{{ git_archive path=. source_name=bart dir_name=bart }}} %if 0%{?rhel} == 07 BuildRequires: fftw-devel, lapack-devel, openblas-devel, atlas-devel, libpng-devel, devtoolset-7-toolchain, devtoolset-7-libatomic-devel, chrpath %else BuildRequires: gcc, make, fftw-devel, lapack-devel, openblas-devel, atlas-devel, libpng-devel, chrpath %endif Requires: fftw, lapack, openblas, atlas, libpng %description The Berkeley Advanced Reconstruction Toolbox (BART) is a free and open-source image-reconstruction framework for Computational Magnetic Resonance Imaging. It consists of a programming library and a toolbox of command-line programs. The library provides common operations on multi-dimensional arrays, Fourier and wavelet transforms, as well as generic implementations of iterative optimization algorithms. The command-line tools provide direct access to basic operations on multi-dimensional arrays as well as efficient implementations of many calibration and reconstruction algorithms for parallel imaging and compressed sensing. # I suppose the binary shouldn't contain debug symbols by default %global debug_package %{nil} %prep {{{ git_setup_macro dir_name=bart }}} # transfer .git-version information from rpkg-macro-expansion time to build time echo {{{ bart_git_version }}} > version.txt %build %if 0%{?rhel} == 07 . /opt/rh/devtoolset-7/enable %endif export LDFLAGS="$LDFLAGS -Wl,--no-as-needed" make PARALLEL=1 make doc/commands.txt # strip rpath from bart exe, as Fedora dislikes it chrpath -d bart %install rm -rf $RPM_BUILD_ROOT export make PREFIX=usr DESTDIR=%{buildroot} install mkdir -p %{buildroot}/usr/share/bash-completion/completions/ install scripts/bart_completion.sh %{buildroot}/usr/share/bash-completion/completions/ install -D doc/bart.1 %{buildroot}/%{_mandir}/man1/bart.1 %files /usr/bin/bart %license LICENSE %{_mandir}/man1/bart.1* %doc /usr/share/doc/bart/README /usr/share/doc/bart/commands.txt {{{ bart_static_docs }}} /usr/share/bash-completion/completions/bart_completion.sh %changelog {{{ git_dir_changelog }}} libbart-devel/pkg/rpm/libbart-dev.install000066400000000000000000000022031472525725500207470ustar00rootroot00000000000000lib/libmisc.a usr/lib/bart src/misc/debug.h usr/include/bart/misc/ src/misc/mmio.h usr/include/bart/misc/ src/misc/version.h usr/include/bart/misc/ src/misc/cppwrap.h usr/include/bart/misc/ src/misc/misc.h usr/include/bart/misc/ src/misc/types.h usr/include/bart/misc/ src/misc/png.h usr/include/bart/misc/ src/misc/opts.h usr/include/bart/misc/ src/misc/nested.h usr/include/bart/misc/ lib/libnum.a usr/lib/bart src/num/multind.h usr/include/bart/num/ src/num/flpmath.h usr/include/bart/num/ src/num/fft.h usr/include/bart/num/ src/num/init.h usr/include/bart/num/ src/num/iovec.h usr/include/bart/num/ src/num/ops.h usr/include/bart/num/ lib/liblinops.a usr/lib/bart src/linops/linop.h usr/include/bart/linops/ src/linops/someops.h usr/include/bart/linops/ src/linops/grad.h usr/include/bart/linops/ lib/libiter.a usr/lib/bart src/iter/iter.h usr/include/bart/iter/ src/iter/iter2.h usr/include/bart/iter/ src/iter/prox.h usr/include/bart/iter/ src/iter/thresh.h usr/include/bart/iter/ src/iter/lsqr.h usr/include/bart/iter/ lib/libwavelet.a usr/lib/bart src/wavelet/wavelet.h usr/include/bart/wavelet/ src/wavelet/wavthresh.h usr/include/bart/wavelet/ libbart-devel/pkg/rpm/libbart-devel.spec000066400000000000000000000024331472525725500205610ustar00rootroot00000000000000Name: libbart-devel Version: {{{ bart_version }}} Release: {{{ bart_release }}}%{?dist} Epoch: 1 Summary: Development files for BART License: BSD URL: https://mrirecon.github.io/bart VCS: {{{ git_dir_vcs }}} Source0: {{{ git_archive path=. source_name=libbart-devel dir_name=libbart-devel }}} BuildRequires: gcc, make, fftw-devel, lapack-devel, openblas-devel, atlas-devel, libpng-devel %description The Berkeley Advanced Reconstruction Toolbox (BART) is a free and open-source image-reconstruction framework for Computational Magnetic Resonance Imaging. This package provides headers and static libraries. # I suppose the binary shouldn't contain debug symbols by default %global debug_package %{nil} %prep {{{ git_setup_macro dir_name=libbart-devel }}} %build export LDFLAGS="$LDFLAGS -Wl,--no-as-needed" make PARALLEL=1 %install rm -rf $RPM_BUILD_ROOT while read line; do src=$(cut -d' ' -f1 <<<"$line") dst=%{buildroot}/$(cut -d' ' -f2 <<<"$line") install -d "$dst" install "$src" "$dst" done < pkg/rpm/libbart-dev.install # ^ Contents of https://salsa.debian.org/med-team/bart/-/blob/master/debian/libbart-dev.install %files /usr/include/bart/ /usr/lib/bart/ %license LICENSE %changelog {{{ git_dir_changelog }}} libbart-devel/pkg/rpm/octave-bart.spec000066400000000000000000000031011472525725500202450ustar00rootroot00000000000000%global octpkg bart Name: octave-%{octpkg} Version: {{{ bart_version }}} Release: {{{ bart_release }}}%{?dist} Epoch: 1 Summary: Octave bindings for BART License: BSD URL: https://mrirecon.github.io/bart VCS: {{{ git_dir_vcs }}} Source0: {{{ git_archive path=. source_name=octave-bart dir_name=octave-bart }}} BuildArch: noarch BuildRequires: octave-devel Requires: bart, octave Requires(post): octave Requires(postun): octave %description The Berkeley Advanced Reconstruction Toolbox (BART) is a free and open-source image-reconstruction framework for Computational Magnetic Resonance Imaging. This package provides Octave bindings for BART. %prep {{{ git_setup_macro dir_name=octave-bart }}} # files that belong inside an octave pkg according to https://octave.org/doc/v4.4.0/Creating-Packages.html mkdir matlab/inst mv matlab/*.m matlab/inst cp LICENSE matlab/COPYING cat > matlab/DESCRIPTION < /dev/null 2>&1 if [[ $? -ne 0 ]]; then release=$(($(./git-version.sh | cut -d'-' -f2) + 1)) date=$(date '+%Y%m%d') commit=$(git rev-parse --short=7 HEAD) release=$release"."$date"git"$commit else release=1 fi echo -n $release } function bart_git_version() { ./git-version.sh | tr -d '\n' } function bart_static_docs() { find doc/ -type f -name "*.txt" | sed "s/^doc\/\(.*\)\$/\/usr\/share\/doc\/bart\/\1/g" } libbart-devel/python/000077500000000000000000000000001472525725500151315ustar00rootroot00000000000000libbart-devel/python/bart.py000066400000000000000000000220731472525725500164370ustar00rootroot00000000000000# Copyright 2016. The Regents of the University of California. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2016 Siddharth Iyer # 2018 Soumick Chatterjee , WSL Support import subprocess as sp import tempfile as tmp import os import sys if __spec__.parent: from . import cfl from .wslsupport import PathCorrection else: # 'traditional' copy-paste bart.py from wslsupport import PathCorrection import cfl isWASM = True if sys.platform == 'emscripten' else False def bart(nargout, cmd, *args, **kwargs): if isWASM: print("Please use await bart.bart2!", file=sys.stderr) raise RuntimeError("Synchronous bart not available in wasm") prep = bart_prepare(nargout, cmd, *args, **kwargs) if not prep: return ERR, stdout, stderr = execute_cmd(prep['shell_cmd']) # store error code, stdout and stderr in function attributes for outside access # this makes it possible to access these variables from outside the function (e.g "print(bart.ERR)") bart.ERR, bart.stdout, bart.stderr = ERR, stdout, stderr return bart_postprocess(nargout, ERR, prep['infiles'], prep['infiles_kw'], prep['outfiles']) async def bart2(nargout, cmd, *args, **kwargs): if not isWASM: print("Please use synchronous bart.bart!", file=sys.stderr) raise RuntimeError("Asynchronous bart is only available in wasm") prep = bart_prepare(nargout, cmd, *args, **kwargs) if not prep: return ERR, stdout, stderr = await run_wasm_cmd(**prep) # store error code, stdout and stderr in function attributes for outside access # this makes it possible to access these variables from outside the function (e.g "print(bart.ERR)") bart.ERR, bart.stdout, bart.stderr = ERR, stdout, stderr return bart_postprocess(nargout, ERR, prep['infiles'], prep['infiles_kw'], prep['outfiles']) def bart_prepare(nargout, cmd, *args, **kwargs): if type(nargout) != int or nargout < 0: print("Usage: bart(, , )") return try: bart_path = os.environ['BART_TOOLBOX_PATH'] except: bart_path = None # support old environment variable: if bart_path is None: try: bart_path = os.environ['TOOLBOX_PATH'] except: bart_path = None isWSL = False if not isWASM and not bart_path: if os.path.isfile('/usr/local/bin/bart'): bart_path = '/usr/local/bin' elif os.path.isfile('/usr/bin/bart'): bart_path = '/usr/bin' else: bartstatus = os.system('wsl bart version -V') if bartstatus==0: bart_path = '/usr/bin' isWSL = True else: raise Exception('Environment variable BART_TOOLBOX_PATH is not set.') name = tmp.NamedTemporaryFile().name nargin = len(args) infiles = [name + 'in' + str(idx) for idx in range(nargin)] for idx in range(nargin): cfl.writecfl(infiles[idx], args[idx]) args_kw = [("--" if len(kw)>1 else "-") + kw for kw in kwargs] infiles_kw = [name + 'in' + kw for kw in kwargs] for idx, kw in enumerate(kwargs): cfl.writecfl(infiles_kw[idx], kwargs[kw]) outfiles = [name + 'out' + str(idx) for idx in range(nargout)] cmd = cmd.split(" ") if os.name =='nt': if isWSL: #For WSL and modify paths infiles = [PathCorrection(item) for item in infiles] infiles_kw = [PathCorrection(item) for item in infiles_kw] outfiles = [PathCorrection(item) for item in outfiles] cmd = [PathCorrection(item) for item in cmd] args_infiles_kw = [item for pair in zip(args_kw, infiles_kw) for item in pair] shell_cmd = ['wsl', 'bart', *cmd, *args_infiles_kw, *infiles, *outfiles] else: #For cygwin use bash and modify paths infiles = [item.replace(os.path.sep, '/') for item in infiles] infiles_kw = [item.replace(os.path.sep, '/') for item in infiles_kw] outfiles = [item.replace(os.path.sep, '/') for item in outfiles] cmd = [item.replace(os.path.sep, '/') for item in cmd] args_infiles_kw = [item for pair in zip(args_kw, infiles_kw) for item in pair] shell_cmd = ['bash.exe', '--login', '-c', os.path.join(bart_path, 'bart'), *cmd, *args_infiles_kw, *infiles, *outfiles] #TODO: Test with cygwin, this is just translation from matlab code assert(not isWASM) else: args_infiles_kw = [item for pair in zip(args_kw, infiles_kw) for item in pair] shell_cmd = [os.path.join(bart_path, 'bart') if not isWASM else 'bart', *cmd, *args_infiles_kw, *infiles, *outfiles] return dict(shell_cmd=shell_cmd, infiles=infiles, infiles_kw=infiles_kw, outfiles=outfiles) def bart_postprocess(nargout, ERR, infiles, infiles_kw, outfiles): for elm in infiles: if os.path.isfile(elm + '.cfl'): os.remove(elm + '.cfl') if os.path.isfile(elm + '.hdr'): os.remove(elm + '.hdr') for elm in infiles_kw: if os.path.isfile(elm + '.cfl'): os.remove(elm + '.cfl') if os.path.isfile(elm + '.hdr'): os.remove(elm + '.hdr') output = [] for idx in range(nargout): elm = outfiles[idx] if not ERR: output.append(cfl.readcfl(elm)) if os.path.isfile(elm + '.cfl'): os.remove(elm + '.cfl') if os.path.isfile(elm + '.hdr'): os.remove(elm + '.hdr') if ERR: print(f"Command exited with error code {ERR}.") return if nargout == 0: return elif nargout == 1: return output[0] else: return output def execute_cmd(cmd): """ Execute a command in a shell. Print and catch the output. """ errcode = 0 stdout = "" stderr = "" # remove empty strings from cmd cmd = [item for item in cmd if len(item)] # execute cmd proc = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True) # print to stdout for stdout_line in iter(proc.stdout.readline, ""): stdout += stdout_line print(stdout_line, end="") proc.stdout.close() # in case of error, print to stderr errcode = proc.wait() if errcode: stderr = "".join(proc.stderr.readlines()) print(stderr) proc.stderr.close() return errcode, stdout, stderr wasm_bart_ok = False; async def get_wasm_cfl(name): await wasm_async_call(f"get_cfl('{name}')") async def put_wasm_cfl(name): await wasm_async_call(f"send_cfl('{name}')") async def rm_bart_cfl(name): await wasm_async_call(f"rm_cfl('{name}')") async def wasm_load_bart(): global wasm_bart_ok await wasm_async_call("reload_bart()") wasm_bart_ok = True async def run_wasm_cmd(shell_cmd, infiles, infiles_kw, outfiles): global wasm_bart_ok; try: if not wasm_bart_ok: await wasm_load_bart() for f in infiles + infiles_kw: await put_wasm_cfl(f) non_empty_cmd = [x for x in shell_cmd if len(shell_cmd) > 0] result = await wasm_async_call("bart_cmd('" + ' '.join(non_empty_cmd) + "')") ERR, stdout, stderr = result['ret'], result['stdout'], result['stderr'] if not stdout is None and len(stdout.strip()) > 0: print(stdout) if not stderr is None and len(stderr.strip()) > 0: print(stderr, file=sys.stderr) if not 0 == ERR: print(f"Function exited with {ERR}", file=sys.stderr) for f in outfiles: await get_wasm_cfl(f) for f in infiles + infiles_kw + outfiles: await rm_bart_cfl(f) return ERR, stdout, stderr except Exception as e: print("Exception in bart worker calls occurred:") print(e) wasm_bart_ok = False raise e async def wasm_async_call(cmd): # synchronous function would be nice # but this seems impossible: https://github.com/pyodide/pyodide/issues/3932 #loop = asyncio.get_event_loop() #task = pyodide.code.run_js(cmd) #return loop.run_until_complete(asyncio.wait([task])) ret = (await pyodide.code.run_js(cmd)).to_py() if 0 != ret[0]: raise Exception(f"Error in JS call: {ret[1]}") return ret[1]; if isWASM: import pyodide, pyodide_js, js # Export pyodide to webworker namespace: pyodide.code.run_js("var pyodide;") js.pyodide = pyodide_js # load BART: pyodide.code.run_js(""" if ('undefined' == (typeof window)) { importScripts("bart_base.js"); } else { script = document.createElement('script'); script.type = 'text/javascript'; script.async = true; script.src = "bart_base.js"; document.body.appendChild(script); } """) libbart-devel/python/bart_tf.py000066400000000000000000000241111472525725500171230ustar00rootroot00000000000000# Copyright 2022. Uecker Lab. University Center Göttingen. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # Moritz Blumenthal import os import numpy as np import cfl import tensorflow as tf2 try: import tensorflow.compat.v1 as tf1 except ImportError: import tensorflow as tf1 pass def tf2_export_module(model, dims, path, trace_complex=True): class BartWrapper(tf2.Module): def __init__(self, model, dims, vars_as_input = True, name=None): super(BartWrapper, self).__init__(name=name) self.model = model self.trace_complex = trace_complex self.dims_bart = [1] * 16 self.dims_tf = [1] * (len(dims) + 1) if not(trace_complex): self.dims_bart = self.dims_bart + [2] self.dims_tf = self.dims_tf + [2] for i in range(len(dims)): self.dims_bart[len(self.dims_bart) - 2 - i] = dims[i] self.dims_tf[len(self.dims_tf) - 2 - i] = dims[i] self.model(np.zeros(self.dims_tf, np.float32)) #run model ones to initialize weights else : for i in range(len(dims)): self.dims_bart[len(self.dims_bart) - 2 - i] = dims[i] self.dims_tf[len(self.dims_tf) - 2 - i] = dims[i] self.model(np.zeros(self.dims_tf, np.complex64)) #run model ones to initialize weights self.dims_tf[0] = -1 self.dims_bart[0] = -1 self.trace_complex = trace_complex if vars_as_input: self.vars = model.variables else: self.vars = [] self.vars_rtoc = [] # variables for which a 0 imaginary part is stacked for var in self.vars: self.vars_rtoc.append(2 != var.shape[-1]) self.sig = {} self.add_concrete_function() @tf2.function def __call__(self, input, weights, grad_in): for i in range(len(weights)): wgh = weights[i] if (self.vars_rtoc)[i]: slc = [ slice(None, None, None) ]* len(wgh.shape) slc[-1] = 0 wgh=wgh[tuple(slc)] self.model.variables[i].assign(wgh) with tf2.GradientTape(persistent=True) as g: g.watch(input) print("Tracing TensorFlow model with dims: {}".format(input)) res = tf2.reshape(input, self.dims_tf) outr = self.model(res) out = tf2.reshape(outr, self.dims_bart) result = {} result["output_0"] = out result["grad_0_0"] = g.gradient(out, input, grad_in) for i, input in enumerate(self.model.variables, 1): result["grad_{}_0".format(i)] = g.gradient(out, input, grad_in) if self.vars_rtoc[i - 1]: tmp = result["grad_{}_0".format(i)] result["grad_{}_0".format(i)] = tf2.stack([tmp, tf2.zeros_like(tmp)], axis = len(tmp.shape)) return result def add_concrete_function(self, name=None): dims = self.dims_bart.copy() dims[0] = None if (self.trace_complex): signature_input = tf2.TensorSpec(shape=dims, dtype=tf2.complex64, name="input_0") signature_grad_ys = tf2.TensorSpec(shape=dims, dtype=tf2.complex64, name="grad_ys_0") else: signature_input = tf2.TensorSpec(shape=dims, dtype=tf2.float32, name="input_0") signature_grad_ys = tf2.TensorSpec(shape=dims, dtype=tf2.float32, name="grad_ys_0") signature_weight = [] for i, var in enumerate(self.model.variables, 1): if self.vars_rtoc[i - 1]: signature_weight.append(tf2.TensorSpec(shape=list(var.shape)+[2], dtype=tf2.float32, name="input_{}".format(i))) else: signature_weight.append(tf2.TensorSpec(shape=var.shape, dtype=tf2.float32, name="input_{}".format(i))) if name is None: name = "serving_default" self.sig[name] = self.__call__.get_concrete_function(signature_input, signature_weight, signature_grad_ys) def save_variables(self, path): weights = [] for i, var in enumerate(self.variables): if (self.vars_rtoc[i]): weights.append(var.numpy().astype(np.complex64)) else: weights.append(np.empty(var.shape[:-1], dtype=np.complex64)) slc = [ slice(None, None, None) ] * len(var.shape) slc[-1] = 0 weights[-1].real = var.numpy()[tuple(slc)] slc[-1] = 1 weights[-1].imag = var.numpy()[tuple(slc)] if 0 == len(weights[-1].shape): weights[-1] = weights[-1].reshape([1]) weights[-1] = np.transpose(weights[-1]) if (0 < len(weights)): cfl.writemulticfl(path, weights) def save(self, path): tf2.saved_model.save(self, path, signatures=self.sig) self.save_variables(path+"/bart_initial_weights") from tensorflow.python.tools import saved_model_utils meta_graph_def = saved_model_utils.get_meta_graph_def(path, "serve") with open(path + "/bart_config.dat", 'w') as f: for signature in list(self.sig): inputs = meta_graph_def.signature_def[signature].inputs outputs = meta_graph_def.signature_def[signature].outputs f.write('# ArgumentNameMapping\n') f.write('{}\n'.format(signature)) for bart_name in list(inputs): f.write("{} {} {}\n".format(bart_name, inputs[bart_name].name.split(":")[0], inputs[bart_name].name.split(":")[1])) for bart_name in list(outputs): f.write("{} {} {}\n".format(bart_name, outputs[bart_name].name.split(":")[0], outputs[bart_name].name.split(":")[1])) BartWrapper(model, dims).save(path) class TensorMap: def __init__(self, tensor, name, enforce_real = False): if isinstance(tensor, TensorMap): self.tensor = tensor.tensor else: self.tensor = tensor self.name = name if (self.tensor.shape[-1] != 2) and (self.tensor.dtype == tf1.float32): self.type = "REAL" else: self.type = "COMPLEX" if isinstance(tensor, TensorMap): self.type = tensor.type if enforce_real: self.type = "REAL" def export(self): n = self.tensor.name return "{} {} {} {}".format(self.name, n.split(":")[0], n.split(":")[1], self.type) def tf1_export_tensor_mapping(path, name, mapping, signature="serving_default"): with open(path + "/" + name + ".map", 'w') as f: f.write('# ArgumentNameMapping\n') f.write('{}\n'.format(signature)) for map in mapping: f.write('{}\n'.format(map.export())) def tf1_op_exists(graph, name): try: graph.get_operation_by_name(name) return True except KeyError: return False def tf1_find_tensors(graph, inputs, outputs): if inputs is None: II = 0 inputs = [] while tf1_op_exists(graph, "input_"+str(II)): inputs.append(graph.get_tensor_by_name("input_{}:0".format(II))) II += 1 if outputs is None: OO = 0 outputs = [] while tf1_op_exists(graph, "output_"+str(OO)): outputs.append(graph.get_tensor_by_name("output_{}:0".format(OO))) OO += 1 for i in range(len(inputs)): inputs[i] = TensorMap(inputs[i], "input_"+str(i)) for i in range(len(outputs)): outputs[i] = TensorMap(outputs[i], "output_"+str(i)) return inputs, outputs def tf1_graph_attach_gradients(graph, inputs, outputs): grad_tensors=[] for o, out in enumerate(outputs): with graph.as_default(): gy = tf1.placeholder(out.tensor.dtype, shape=out.tensor.shape, name='grad_ys_'+ str(o)) grad_tensors.append(TensorMap(gy, 'grad_ys_'+ str(o), out.type == "REAL")) for i, inp in enumerate(inputs): for o, out in enumerate(outputs): name = 'grad_{}_{}'.format(i, o) with graph.as_default(): grad = tf1.gradients(out.tensor, inp.tensor, grad_tensors[o].tensor) grad = tf1.reshape(grad, tf1.shape(inp.tensor), name='grad_{}_{}'.format(i, o)) grad_tensors.append(TensorMap(grad, name, inp.type == "REAL")) return grad_tensors def tf1_export_graph(path, graph = None, session=None, inputs=None, outputs=None, name=None, attach_gradients=True): if graph is None: graph = tf1.get_default_graph() if name is None: name = os.path.basename(os.path.normpath(path)) inputs, outputs = tf1_find_tensors(graph, inputs, outputs) mappings = [] if attach_gradients: mappings = tf1_graph_attach_gradients(graph, inputs, outputs) mappings += inputs mappings += outputs tf1.train.write_graph(graph, path, name+'.pb', False) if session is not None: saver = tf1.train.Saver() saver.save(session, os.path.join(path, name)) else: if (tf1_op_exists(graph, "save/restore_all")): print("WARNING: No weights are stored with the graph!\nWARNING: BART probably will not be able to load the graph.") tf1_export_tensor_mapping(path, name, mappings) def tf1_convert_model(model_path, path, name): sess = tf1.Session() saver = tf1.train.Saver() saver.restore(sess, model_path) tf1_graph_attach_gradients(sess.graph) tf1_export_graph(sess.graph, path, name, session=sess) libbart-devel/python/bartview.py000077500000000000000000000325061472525725500173370ustar00rootroot00000000000000#!/usr/bin/python # # Copyright 2015. The Regents of the University of California. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2015 Frank Ong from __future__ import division import operator import numpy as np import sys import matplotlib import matplotlib.pyplot as plt from matplotlib.widgets import Slider, Button, RadioButtons from functools import partial import time import threading import os.path class DiscreteSlider(Slider): """A matplotlib slider widget with discrete steps.""" def __init__(self, *args, **kwargs): self.previous_val = kwargs['valinit'] Slider.__init__(self, *args, **kwargs) def set_val(self, val): discrete_val = round(val) xy = self.poly.xy xy[2] = discrete_val, 1 xy[3] = discrete_val, 0 self.poly.xy = xy self.valtext.set_text(self.valfmt % discrete_val) if self.drawon: self.ax.figure.canvas.draw() self.val = val if self.previous_val!=discrete_val: self.previous_val = discrete_val if not self.eventson: return for cid, func in self.observers.iteritems(): func(discrete_val) class BartView(object): def __init__(self, cflname): matplotlib.rcParams['toolbar'] = 'None' #matplotlib.rcParams['font.size'] = 6 # Read data self.cflname = sys.argv[1] self.im = self.readcfl(self.cflname) self.im_unsqueeze_shape = np.where( np.array(self.im.shape) > 1 )[0] self.im = self.im.squeeze() t1 = time.clock() # Reorder image self.Ndims = len( self.im.shape ) self.order = np.r_[:self.Ndims] self.im_ordered = self.im self.order3 = np.array([0,1,1]) # Slice image self.slice_num = np.zeros( self.Ndims, dtype = 'int' ); self.im_shape = self.im_ordered.shape self.im_slice = self.im_ordered[ (slice(None), slice(None)) + tuple(self.slice_num[2:]) ] # Create figure self.fig = plt.figure(facecolor='black', figsize=(9,6)) #self.fig = plt.figure(facecolor='black', figsize=(6,4)) self.fig.subplots_adjust( left=0.0 , bottom=0.0 , right=1.0 , top=1 - 0.25) self.fig.canvas.set_window_title(self.cflname) # Show image self.immax = np.max(abs(self.im)) self.l = plt.imshow( abs(self.im_slice) , cmap = "gray", vmin=0, vmax=self.immax) self.ax = plt.gca() self.asp = self.im_ordered.shape[1] / self.im_ordered.shape[0] self.aspect = 1 self.ax.set_aspect( 1 ) plt.axis('off') radios = [] buttons = [] sliders = [] # Create Radio Buttons for X Y dimensions dims = self.im_unsqueeze_shape[ self.order ].astype(str) for i in xrange(0,len(dims)): dims[i] = "Dim " + dims[i] oboxx_ax = plt.axes( [0, 1 - 0.03, 0.1, 0.03], axisbg = "gainsboro" ) oboxx_ax.set_xticks([]); oboxx_ax.set_yticks([]); orderx_ax = plt.axes( [0, 1 - 0.18, 0.1, 0.15], axisbg = 'gainsboro' ) orderx_radio = RadioButtons( orderx_ax, dims, activecolor = 'SteelBlue', active = 0 ) orderx_ax.text(0.5,1.05, 'Up/Down', horizontalalignment = 'center') radios.append( orderx_radio ) orderx_radio.on_clicked( self.update_orderx ) oboxy_ax = plt.axes( [0.1, 1 - 0.03, 0.1, 0.03], axisbg = "gainsboro" ) oboxy_ax.set_xticks([]); oboxy_ax.set_yticks([]); ordery_ax = plt.axes( [0.1, 1 - 0.18, 0.1, 0.15], axisbg = 'gainsboro' ) ordery_radio = RadioButtons( ordery_ax, dims, activecolor = 'SteelBlue', active = 1 ) ordery_ax.text(0.5,1.05, 'Left/Right', horizontalalignment = 'center') radios.append( ordery_radio ) ordery_radio.on_clicked( self.update_ordery ) # Create Radio buttons for mosaic self.mosaic_valid = False mbox_ax = plt.axes( [0.2, 1 - 0.03, 0.1, 0.03], axisbg = "gainsboro" ) mbox_ax.set_xticks([]); mbox_ax.set_yticks([]); mosaic_ax = plt.axes( [0.2, 1 - 0.18, 0.1, 0.15], axisbg = 'gainsboro' ) mosaic_radio = RadioButtons( mosaic_ax, dims, activecolor = 'SteelBlue', active = 1 ) mosaic_ax.text(0.5,1.05, 'Mosaic', horizontalalignment = 'center') radios.append( mosaic_radio ) mosaic_radio.on_clicked( self.update_mosaic ) # Create flip buttons self.flipx = 1; flipx_ax = plt.axes( [0.3, 1 - 0.09, 0.1, 0.09] ) flipx_button = Button( flipx_ax, 'Flip\nUp/Down', color='gainsboro' ) flipx_button.on_clicked(self.update_flipx); self.flipy = 1; flipy_ax = plt.axes( [0.3, 1 - 0.18, 0.1, 0.09] ) flipy_button = Button( flipy_ax, 'Flip\nLeft/Right', color='gainsboro' ) flipy_button.on_clicked(self.update_flipy); # Create Refresh buttons refresh_ax = plt.axes( [0.4, 1 - 0.09, 0.1, 0.09] ) refresh_button = Button( refresh_ax, 'Refresh', color='gainsboro' ) refresh_button.on_clicked(self.update_refresh); # Create Save button save_ax = plt.axes( [0.4, 1 - 0.18, 0.1, 0.09] ) save_button = Button( save_ax, 'Export to\nPNG', color='gainsboro' ) save_button.on_clicked(self.save); # Create dynamic refresh radio button #self.drefresh = threading.Event() #drefresh_ax = plt.axes( [0.4, 1 - 0.18, 0.1, 0.09] ) #drefresh_button = Button( drefresh_ax, 'Dynamic\nRefresh', color='gainsboro' ) #drefresh_button.on_clicked(self.update_drefresh); # Create Magnitude/phase radio button self.magnitude = True mag_ax = plt.axes( [0.5, 1 - 0.18, 0.1, 0.18], axisbg = 'gainsboro' ) mag_radio = RadioButtons( mag_ax, ('Mag','Phase') , activecolor = 'SteelBlue', active = 0 ) radios.append( mag_radio ) mag_radio.on_clicked( self.update_magnitude ) sbox_ax = plt.axes( [0.6, 1 - 0.18, 0.5, 0.18], axisbg='gainsboro') sbox_ax.set_xticks([]) sbox_ax.set_yticks([]) # Create aspect sliders aspect_ax = plt.axes( [0.65, 1 - 0.09 + 0.02, 0.1, 0.02], axisbg = 'white' ) aspect_slider = Slider( aspect_ax, "", 0.25, 4, valinit=1, color='SteelBlue') aspect_ax.text( 4 / 2,1.5, 'Aspect Ratio', horizontalalignment = 'center') sliders.append( aspect_slider ) aspect_slider.on_changed( self.update_aspect ) # Create contrast sliders self.vmin = 0 vmin_ax = plt.axes( [0.83, 1 - 0.09 + 0.02, 0.1, 0.02], axisbg = 'white' ) vmin_slider = Slider( vmin_ax, "", 0, 1, valinit=0, color='SteelBlue') vmin_ax.text(0.5,1.5, 'Contrast Min', horizontalalignment = 'center') sliders.append( vmin_slider ) vmin_slider.on_changed( self.update_vmin ) self.vmax = 1 vmax_ax = plt.axes( [0.83, 1 - 0.18 + 0.02, 0.1, 0.02], axisbg = 'white' ) vmax_slider = Slider( vmax_ax, "", 0, 1, valinit=1, color='SteelBlue') vmax_ax.text(0.5,1.5, 'Contrast Max', horizontalalignment = 'center') sliders.append( vmax_slider ) vmax_slider.on_changed( self.update_vmax ) # Create sliders for choosing slices box_ax = plt.axes( [0, 1 - 0.25, 1, 0.07], axisbg='gainsboro') box_ax.set_xticks([]) box_ax.set_yticks([]) slider_thick = 0.02 slider_start = 0.1 ax = [] for d in np.r_[:self.Ndims]: slice_ax = plt.axes( [0.01 + 1 / self.Ndims * d, 1 - 0.24, 0.8 / self.Ndims, slider_thick] , axisbg='white') slice_slider = DiscreteSlider( slice_ax, "", 0, self.im_shape[d]-1, valinit=self.slice_num[d],valfmt='%i', color='SteelBlue') slice_ax.text( (self.im_shape[d]-1)/2,1.5, 'Dim %d Slice' % self.im_unsqueeze_shape[d], horizontalalignment = 'center' ) sliders.append(slice_slider); slice_slider.on_changed( partial( self.update_slice, d ) ) plt.show() def readcfl(self, name): h = open(name + ".hdr", "r") h.readline() # skip l = h.readline() dims = [int(i) for i in l.split( )] n = reduce(operator.mul, dims, 1) h.close() return np.memmap( name + ".cfl", dtype = np.complex64, mode='r', shape=tuple(dims), order='F' ) def save( self, event ): extent = self.ax.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted()) num = 0 fname = self.cflname + '_' + str(num) + '.png' while( os.path.isfile(fname) ): num += 1 fname = self.cflname + '_' + str(num) + '.png' self.fig.savefig(fname, bbox_inches=extent) def update_flipx( self, event ): self.flipx *= -1 self.update_image() def update_flipy( self, event ): self.flipy *= -1 self.update_image() def update_refresh( self, event ): self.update_image() def dynamic_refresh( self ): while( self.drefresh.is_set() ): self.update_image() def update_drefresh( self, event ): if ( self.drefresh.is_set() ): self.drefresh.clear() else: self.drefresh.set() th = threading.Thread( target = self.dynamic_refresh ) th.start() def update_aspect( self, aspect ): self.aspect = aspect self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * aspect ) def update_vmax( self, vmax ): self.vmax = max(self.vmin, vmax) self.l.set_clim( vmax = self.vmax * self.immax ); def update_vmin( self, vmin ): self.vmin = min(self.vmax,vmin) self.l.set_clim( vmin = self.vmin * self.immax ); def update_magnitude( self, l ): self.magnitude = ( l == 'Mag' ) if (self.magnitude): self.l.set_cmap('gray') else: self.l.set_cmap('hsv') self.update_image() def update_orderx( self, l ): l = int(l[4:]) self.order3[0] = np.where( self.im_unsqueeze_shape == l )[0] self.update_ordered_image() def update_ordery( self, l ): l = int(l[4:]) self.order3[1] = np.where( self.im_unsqueeze_shape == l )[0] self.update_ordered_image() def update_ordered_image(self): self.mosaic_valid = len( self.order3[:3] ) == len( set( self.order3[:3] ) ) self.order_valid = len( self.order3[:2] ) == len( set( self.order3[:2] ) ); if ( self.mosaic_valid ): self.order[:3] = self.order3[:3] order_remain = np.r_[:self.Ndims] for t in np.r_[:3]: order_remain = order_remain[ (order_remain != self.order[t] ) ] self.order[3:] = order_remain self.im_ordered = np.transpose( self.im, self.order ) self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * self.aspect ) self.update_image() elif ( self.order_valid ): self.order[:2] = self.order3[:2] order_remain = np.r_[:self.Ndims] for t in np.r_[:2]: order_remain = order_remain[ (order_remain != self.order[t] ) ] self.order[2:] = order_remain self.im_ordered = np.transpose( self.im, self.order ) self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * self.aspect ) self.update_image() def update_image( self ): self.immax = np.max(abs(self.im)) self.l.set_clim( vmin = self.vmin * self.immax , vmax = self.vmax * self.immax ); if ( self.mosaic_valid ): im_slice = self.im_ordered[ (slice(None,None,self.flipx), slice(None,None,self.flipy), slice(None)) + tuple(self.slice_num[self.order[3:]])] im_slice = self.mosaic( im_slice ) else: im_slice = self.im_ordered[ (slice(None,None,self.flipx), slice(None,None,self.flipy)) + tuple(self.slice_num[self.order[2:]]) ] if self.magnitude: self.l.set_data( abs(im_slice) ) else: self.l.set_data( (np.angle(im_slice) + np.pi) / (2 * np.pi) ) self.fig.canvas.draw() def update_slice( self, d, s ): self.slice_num[d] = int(round(s)) self.update_image() def mosaic( self, im ): im = im.squeeze() (x, y, z) = im.shape z2 = int( np.ceil( z ** 0.5 ) ) z = int( z2 ** 2 ) im = np.pad( im, [(0,0), (0,0), (0, z - im.shape[2] )], mode='constant') im = im.reshape( (x, y * z, 1), order = 'F' ) im = im.transpose( (1, 2, 0) ) im = im.reshape( (y * z2 , z2, x), order = 'F' ) im = im.transpose( (2, 1, 0) ) im = im.reshape( (x * z2, y * z2), order = 'F' ) return im def update_mosaic( self, l ): l = int(l[4:]) self.order3[2] = np.where( self.im_unsqueeze_shape == l )[0] self.update_ordered_image() if __name__ == "__main__": # Error if more than 1 argument if (len(sys.argv) != 2): print "BartView: multidimensional image viewer for cfl" print "Usage: bview cflname" exit() BartView( sys.argv[1] ) libbart-devel/python/bartview3.py000077500000000000000000000327671472525725500174330ustar00rootroot00000000000000#!/usr/bin/python3 # # Copyright 2017. Massachusetts Institute of Technology. # Copyright 2015. The Regents of the University of California. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2015 Frank Ong # 2017 Siddharth Iyer import operator import numpy as np import sys import matplotlib import matplotlib.pyplot as plt from matplotlib.widgets import Slider, Button, RadioButtons from functools import partial, reduce import time import threading import os.path class DiscreteSlider(Slider): """A matplotlib slider widget with discrete steps.""" def __init__(self, *args, **kwargs): self.previous_val = kwargs['valinit'] Slider.__init__(self, *args, **kwargs) def set_val(self, val): discrete_val = round(val) xy = self.poly.xy xy[2] = discrete_val, 1 xy[3] = discrete_val, 0 self.poly.xy = xy self.valtext.set_text(self.valfmt % discrete_val) if self.drawon: self.ax.figure.canvas.draw() self.val = val if self.previous_val!=discrete_val: self.previous_val = discrete_val if not self.eventson: return for cid, func in self.observers.items(): func(discrete_val) class BartView(object): def __init__(self, cflname): matplotlib.rcParams['toolbar'] = 'None' #matplotlib.rcParams['font.size'] = 6 # Read data self.cflname = sys.argv[1] self.im = self.readcfl(self.cflname) self.im_unsqueeze_shape = np.where( np.array(self.im.shape) > 1 )[0] self.im = self.im.squeeze() if sys.version_info.major==3 and sys.version_info.minor < 8: t1 = time.clock() # Reorder image self.Ndims = len( self.im.shape ) self.order = np.r_[:self.Ndims] self.im_ordered = self.im self.order3 = np.array([0,1,1]) # Slice image self.slice_num = np.zeros( self.Ndims, dtype = 'int' ); self.im_shape = self.im_ordered.shape self.im_slice = self.im_ordered[ (slice(None), slice(None)) + tuple(self.slice_num[2:]) ] # Create figure self.fig = plt.figure(facecolor='black', figsize=(9,6)) #self.fig = plt.figure(facecolor='black', figsize=(6,4)) self.fig.subplots_adjust( left=0.0 , bottom=0.0 , right=1.0 , top=1 - 0.25) self.fig.canvas.set_window_title(self.cflname) # Show image self.immax = np.max(abs(self.im)) self.l = plt.imshow( abs(self.im_slice) , cmap = "gray", vmin=0, vmax=self.immax) self.ax = plt.gca() self.asp = self.im_ordered.shape[1] / self.im_ordered.shape[0] self.aspect = 1 self.ax.set_aspect( 1 ) plt.axis('off') radios = [] buttons = [] sliders = [] # Create Radio Buttons for X Y dimensions dims = self.im_unsqueeze_shape[ self.order ].astype(str) for i in range(0,len(dims)): dims[i] = "Dim " + dims[i] oboxx_ax = plt.axes( [0, 1 - 0.03, 0.1, 0.03], facecolor = "gainsboro" ) oboxx_ax.set_xticks([]); oboxx_ax.set_yticks([]); orderx_ax = plt.axes( [0, 1 - 0.18, 0.1, 0.15], facecolor = 'gainsboro' ) orderx_radio = RadioButtons( orderx_ax, dims, activecolor = 'SteelBlue', active = 0 ) orderx_ax.text(0.5,1.05, 'Up/Down', horizontalalignment = 'center') radios.append( orderx_radio ) orderx_radio.on_clicked( self.update_orderx ) oboxy_ax = plt.axes( [0.1, 1 - 0.03, 0.1, 0.03], facecolor = "gainsboro" ) oboxy_ax.set_xticks([]); oboxy_ax.set_yticks([]); ordery_ax = plt.axes( [0.1, 1 - 0.18, 0.1, 0.15], facecolor = 'gainsboro' ) ordery_radio = RadioButtons( ordery_ax, dims, activecolor = 'SteelBlue', active = 1 ) ordery_ax.text(0.5,1.05, 'Left/Right', horizontalalignment = 'center') radios.append( ordery_radio ) ordery_radio.on_clicked( self.update_ordery ) # Create Radio buttons for mosaic self.mosaic_valid = False mbox_ax = plt.axes( [0.2, 1 - 0.03, 0.1, 0.03], facecolor = "gainsboro" ) mbox_ax.set_xticks([]); mbox_ax.set_yticks([]); mosaic_ax = plt.axes( [0.2, 1 - 0.18, 0.1, 0.15], facecolor = 'gainsboro' ) mosaic_radio = RadioButtons( mosaic_ax, dims, activecolor = 'SteelBlue', active = 1 ) mosaic_ax.text(0.5,1.05, 'Mosaic', horizontalalignment = 'center') radios.append( mosaic_radio ) mosaic_radio.on_clicked( self.update_mosaic ) # Create flip buttons self.flipx = 1; flipx_ax = plt.axes( [0.3, 1 - 0.09, 0.1, 0.09] ) flipx_button = Button( flipx_ax, 'Flip\nUp/Down', color='gainsboro' ) flipx_button.on_clicked(self.update_flipx); self.flipy = 1; flipy_ax = plt.axes( [0.3, 1 - 0.18, 0.1, 0.09] ) flipy_button = Button( flipy_ax, 'Flip\nLeft/Right', color='gainsboro' ) flipy_button.on_clicked(self.update_flipy); # Create Refresh buttons refresh_ax = plt.axes( [0.4, 1 - 0.09, 0.1, 0.09] ) refresh_button = Button( refresh_ax, 'Refresh', color='gainsboro' ) refresh_button.on_clicked(self.update_refresh); # Create Save button save_ax = plt.axes( [0.4, 1 - 0.18, 0.1, 0.09] ) save_button = Button( save_ax, 'Export to\nPNG', color='gainsboro' ) save_button.on_clicked(self.save); # Create dynamic refresh radio button #self.drefresh = threading.Event() #drefresh_ax = plt.axes( [0.4, 1 - 0.18, 0.1, 0.09] ) #drefresh_button = Button( drefresh_ax, 'Dynamic\nRefresh', color='gainsboro' ) #drefresh_button.on_clicked(self.update_drefresh); # Create Magnitude/phase radio button self.magnitude = True mag_ax = plt.axes( [0.5, 1 - 0.18, 0.1, 0.18], facecolor = 'gainsboro' ) mag_radio = RadioButtons( mag_ax, ('Mag','Phase') , activecolor = 'SteelBlue', active = 0 ) radios.append( mag_radio ) mag_radio.on_clicked( self.update_magnitude ) sbox_ax = plt.axes( [0.6, 1 - 0.18, 0.5, 0.18], facecolor='gainsboro') sbox_ax.set_xticks([]) sbox_ax.set_yticks([]) # Create aspect sliders aspect_ax = plt.axes( [0.65, 1 - 0.09 + 0.02, 0.1, 0.02], facecolor = 'white' ) aspect_slider = Slider( aspect_ax, "", 0.25, 4, valinit=1, color='SteelBlue') aspect_ax.text( 4 / 2,1.5, 'Aspect Ratio', horizontalalignment = 'center') sliders.append( aspect_slider ) aspect_slider.on_changed( self.update_aspect ) # Create contrast sliders self.vmin = 0 vmin_ax = plt.axes( [0.83, 1 - 0.09 + 0.02, 0.1, 0.02], facecolor = 'white' ) vmin_slider = Slider( vmin_ax, "", 0, 1, valinit=0, color='SteelBlue') vmin_ax.text(0.5,1.5, 'Contrast Min', horizontalalignment = 'center') sliders.append( vmin_slider ) vmin_slider.on_changed( self.update_vmin ) self.vmax = 1 vmax_ax = plt.axes( [0.83, 1 - 0.18 + 0.02, 0.1, 0.02], facecolor = 'white' ) vmax_slider = Slider( vmax_ax, "", 0, 1, valinit=1, color='SteelBlue') vmax_ax.text(0.5,1.5, 'Contrast Max', horizontalalignment = 'center') sliders.append( vmax_slider ) vmax_slider.on_changed( self.update_vmax ) # Create sliders for choosing slices box_ax = plt.axes( [0, 1 - 0.25, 1, 0.07], facecolor='gainsboro') box_ax.set_xticks([]) box_ax.set_yticks([]) slider_thick = 0.02 slider_start = 0.1 ax = [] for d in np.r_[:self.Ndims]: slice_ax = plt.axes( [0.01 + 1 / self.Ndims * d, 1 - 0.24, 0.8 / self.Ndims, slider_thick] , facecolor='white') slice_slider = DiscreteSlider( slice_ax, "", 0, self.im_shape[d]-1, valinit=self.slice_num[d],valfmt='%i', color='SteelBlue') slice_ax.text( (self.im_shape[d]-1)/2,1.5, 'Dim %d Slice' % self.im_unsqueeze_shape[d], horizontalalignment = 'center' ) sliders.append(slice_slider); slice_slider.on_changed( partial( self.update_slice, d ) ) plt.show() def readcfl(self, name): h = open(name + ".hdr", "r") h.readline() # skip l = h.readline() dims = [int(i) for i in l.split( )] n = reduce(operator.mul, dims, 1) h.close() return np.memmap( name + ".cfl", dtype = np.complex64, mode='r', shape=tuple(dims), order='F' ) def save( self, event ): extent = self.ax.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted()) num = 0 fname = self.cflname + '_' + str(num) + '.png' while( os.path.isfile(fname) ): num += 1 fname = self.cflname + '_' + str(num) + '.png' self.fig.savefig(fname, bbox_inches=extent) def update_flipx( self, event ): self.flipx *= -1 self.update_image() def update_flipy( self, event ): self.flipy *= -1 self.update_image() def update_refresh( self, event ): self.update_image() def dynamic_refresh( self ): while( self.drefresh.is_set() ): self.update_image() def update_drefresh( self, event ): if ( self.drefresh.is_set() ): self.drefresh.clear() else: self.drefresh.set() th = threading.Thread( target = self.dynamic_refresh ) th.start() def update_aspect( self, aspect ): self.aspect = aspect self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * aspect ) def update_vmax( self, vmax ): self.vmax = max(self.vmin, vmax) self.l.set_clim( vmax = self.vmax * self.immax ); def update_vmin( self, vmin ): self.vmin = min(self.vmax,vmin) self.l.set_clim( vmin = self.vmin * self.immax ); def update_magnitude( self, l ): self.magnitude = ( l == 'Mag' ) if (self.magnitude): self.l.set_cmap('gray') else: self.l.set_cmap('hsv') self.update_image() def update_orderx( self, l ): l = int(l[4:]) self.order3[0] = np.where( self.im_unsqueeze_shape == l )[0] self.update_ordered_image() def update_ordery( self, l ): l = int(l[4:]) self.order3[1] = np.where( self.im_unsqueeze_shape == l )[0] self.update_ordered_image() def update_ordered_image(self): self.mosaic_valid = len( self.order3[:3] ) == len( set( self.order3[:3] ) ) self.order_valid = len( self.order3[:2] ) == len( set( self.order3[:2] ) ); if ( self.mosaic_valid ): self.order[:3] = self.order3[:3] order_remain = np.r_[:self.Ndims] for t in np.r_[:3]: order_remain = order_remain[ (order_remain != self.order[t] ) ] self.order[3:] = order_remain self.im_ordered = np.transpose( self.im, self.order ) self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * self.aspect ) self.update_image() elif ( self.order_valid ): self.order[:2] = self.order3[:2] order_remain = np.r_[:self.Ndims] for t in np.r_[:2]: order_remain = order_remain[ (order_remain != self.order[t] ) ] self.order[2:] = order_remain self.im_ordered = np.transpose( self.im, self.order ) self.ax.set_aspect( self.asp * self.im_ordered.shape[0] / self.im_ordered.shape[1] * self.aspect ) self.update_image() def update_image( self ): self.immax = np.max(abs(self.im)) self.l.set_clim( vmin = self.vmin * self.immax , vmax = self.vmax * self.immax ); if ( self.mosaic_valid ): im_slice = self.im_ordered[ (slice(None,None,self.flipx), slice(None,None,self.flipy), slice(None)) + tuple(self.slice_num[self.order[3:]])] im_slice = self.mosaic( im_slice ) else: im_slice = self.im_ordered[ (slice(None,None,self.flipx), slice(None,None,self.flipy)) + tuple(self.slice_num[self.order[2:]]) ] if self.magnitude: self.l.set_data( abs(im_slice) ) else: self.l.set_data( (np.angle(im_slice) + np.pi) / (2 * np.pi) ) self.fig.canvas.draw() def update_slice( self, d, s ): self.slice_num[d] = int(round(s)) self.update_image() def mosaic( self, im ): im = im.squeeze() (x, y, z) = im.shape z2 = int( np.ceil( z ** 0.5 ) ) z = int( z2 ** 2 ) im = np.pad( im, [(0,0), (0,0), (0, z - im.shape[2] )], mode='constant') im = im.reshape( (x, y * z, 1), order = 'F' ) im = im.transpose( (1, 2, 0) ) im = im.reshape( (y * z2 , z2, x), order = 'F' ) im = im.transpose( (2, 1, 0) ) im = im.reshape( (x * z2, y * z2), order = 'F' ) return im def update_mosaic( self, l ): l = int(l[4:]) self.order3[2] = np.where( self.im_unsqueeze_shape == l )[0] self.update_ordered_image() if __name__ == "__main__": # Error if more than 1 argument if (len(sys.argv) != 2): print("BartView: multidimensional image viewer for cfl") print("Usage: bview cflname") exit() BartView( sys.argv[1] ) libbart-devel/python/cfl.py000066400000000000000000000124511472525725500162520ustar00rootroot00000000000000# Copyright 2013-2015. The Regents of the University of California. # Copyright 2021. Uecker Lab. University Center Göttingen. # Copyright 2024. Institute for Biomedical Imaging. TU Graz. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2013 Martin Uecker # 2015 Jonathan Tamir from __future__ import print_function from __future__ import with_statement import numpy as np import mmap import os # see src/misc/io.c for rawarray header definition _RA_MAGIC = int(0x7961727261776172) _RA_TYPE_COMPLEX = int(4) _RA_CFL_SIZE = int(8) _RA_HEADER_ELEMS = 6 def _readra(name): with open(name, "rb") as f: header = np.fromfile(f, dtype=np.uint64, count=_RA_HEADER_ELEMS) magic = header[0] flags = header[1] eltype = header[2] elsize = header[3] datasize = header[4] ndims = header[5] if ( magic != _RA_MAGIC or (flags & np.uint64(1)) != 0 or eltype != _RA_TYPE_COMPLEX or elsize != _RA_CFL_SIZE ): print("Invalid .ra header!") raise RuntimeError shape_arr = np.fromfile(f, dtype=np.uint64, count = ndims) arr = np.fromfile(f, dtype=np.complex64, count = datasize // elsize).reshape(shape_arr, order='F') return arr def _writera(name, array): header = np.empty((6,), dtype=np.uint64) header[0] = _RA_MAGIC header[1] = np.uint64(0) header[2] = _RA_TYPE_COMPLEX header[3] = _RA_CFL_SIZE header[4] = np.prod(array.shape) * np.dtype(np.complex64).itemsize header[5] = array.ndim shape_arr = np.array(array.shape, dtype=np.uint64) fullsize = int(header[4] + header.nbytes + shape_arr.nbytes) with open(name, "a+b") as d: os.ftruncate(d.fileno(), fullsize) mm = mmap.mmap(d.fileno(), fullsize, flags=mmap.MAP_SHARED, prot=mmap.PROT_WRITE) if array.dtype != np.complex64: array = array.astype(np.complex64) mm.write(np.ascontiguousarray(header)) mm.write(np.ascontiguousarray(shape_arr)) mm.write(np.ascontiguousarray(array.T)) mm.close() return def readcfl(name): if name.endswith(".ra"): return _readra(name) # get dims from .hdr with open(name + ".hdr", "rt") as h: h.readline() # skip l = h.readline() dims = [int(i) for i in l.split()] # remove singleton dimensions from the end n = np.prod(dims) dims_prod = np.cumprod(dims) dims = dims[:np.searchsorted(dims_prod, n)+1] # load data and reshape into dims with open(name + ".cfl", "rb") as d: a = np.fromfile(d, dtype=np.complex64, count=n); return a.reshape(dims, order='F') # column-major def readmulticfl(name): # get dims from .hdr with open(name + ".hdr", "rt") as h: lines = h.read().splitlines() index_dim = 1 + lines.index('# Dimensions') total_size = int(lines[index_dim]) index_sizes = 1 + lines.index('# SizesDimensions') sizes = [int(i) for i in lines[index_sizes].split()] index_dims = 1 + lines.index('# MultiDimensions') with open(name + ".cfl", "rb") as d: a = np.fromfile(d, dtype=np.complex64, count=total_size) offset = 0 result = [] for i in range(len(sizes)): dims = ([int(i) for i in lines[index_dims + i].split()]) n = np.prod(dims) result.append(a[offset:offset+n].reshape(dims, order='F')) offset += n if total_size != offset: print("Error") return result def writecfl(name, array): if name.endswith(".ra"): return _writera(name, array) with open(name + ".hdr", "wt") as h: h.write('# Dimensions\n') for i in (array.shape): h.write("%d " % i) h.write('\n') size = np.prod(array.shape) * np.dtype(np.complex64).itemsize with open(name + ".cfl", "a+b") as d: os.ftruncate(d.fileno(), size) mm = mmap.mmap(d.fileno(), size, flags=mmap.MAP_SHARED, prot=mmap.PROT_WRITE) if array.dtype != np.complex64: array = array.astype(np.complex64) mm.write(np.ascontiguousarray(array.T)) mm.close() #with mmap.mmap(d.fileno(), size, flags=mmap.MAP_SHARED, prot=mmap.PROT_WRITE) as mm: # mm.write(array.astype(np.complex64).tobytes(order='F')) def writemulticfl(name, arrays): size = 0 dims = [] for array in arrays: size += array.size dims.append(array.shape) with open(name + ".hdr", "wt") as h: h.write('# Dimensions\n') h.write("%d\n" % size) h.write('# SizesDimensions\n') for dim in dims: h.write("%d " % len(dim)) h.write('\n') h.write('# MultiDimensions\n') for dim in dims: for i in dim: h.write("%d " % i) h.write('\n') size = size * np.dtype(np.complex64).itemsize with open(name + ".cfl", "a+b") as d: os.ftruncate(d.fileno(), size) mm = mmap.mmap(d.fileno(), size, flags=mmap.MAP_SHARED, prot=mmap.PROT_WRITE) for array in arrays: if array.dtype != np.complex64: array = array.astype(np.complex64) mm.write(np.ascontiguousarray(array.T)) mm.close() libbart-devel/python/splines_from_svg.py000077500000000000000000000345411472525725500210740ustar00rootroot00000000000000#!/usr/bin/python # -- coding: utf-8 -- """ Copyright 2022. Uecker Lab, University Medical Center Goettingen. Authors: 2022 Martin Schilling (martin.schilling@med.uni-goettingen.de) 2022 Nick Scholand (scholand@tugraz.at) DESCRIPTION : This script takes an SVG file as an input, analyses the paths of objects, which can consist of horizontal, vertical, diagonal and cubic spline transformations, splits these transformations up into cubic Hermite splines and creates a CFL file for use with the BART phantom command line tool. """ import os import numpy as np import argparse import sys if 'BART_TOOLBOX_PATH' in os.environ and os.path.exists(os.environ['BART_TOOLBOX_PATH']): sys.path.insert(0, os.path.join(os.environ['BART_TOOLBOX_PATH'], 'python')) elif 'TOOLBOX_PATH' in os.environ and os.path.exists(os.environ['TOOLBOX_PATH']): sys.path.insert(0, os.path.join(os.environ['TOOLBOX_PATH'], 'python')) else: raise RuntimeError("BART_TOOLBOX_PATH is not set correctly!") import cfl DBLEVEL = 0 def read_svg(svg_input, scale_flag=True): """ Reads a given svg file to extract parameters of paths. :param str svg_input: File path to input svg. :param bool scale_coords: Boolean for scaling coordinates :returns: List of lists for paths. Element: [object_id, color, transforms] :rtype: list """ paths_list = [] prev_key_list, points_list = [], [] readout = False with open (svg_input, 'rt', encoding="utf8", errors='ignore') as input: for line in input: if "" in line: readout = False prev_key_list.append(prev_keys) points_list.append(points) paths_list.append([object_id, color]) input.close() if scale_flag: scale_coords(points_list, center=[0,0], norm=1.8) for num,(k,p) in enumerate(zip(prev_key_list,points_list)): if 0 != len(k): transforms = get_transforms(k,p) paths_list[num].append(transforms) else: paths_list[num].append([]) return paths_list def scale_coords(coords_list, center=[187.5, 125], norm=350): """ Scale coordinates to a given center with a maximal norm. :param list coords_list: List of lists of coordinates. Each list belongs to a series of control points. :param list center: Coordinates of new center :param int norm: Maximal value for width and height """ x_min = coords_list[0][0][0] x_max = coords_list[0][0][0] y_min = coords_list[0][0][1] y_max = coords_list[0][0][1] # determine maximal and minimal x- and y-values for cs in coords_list: for c in cs: x_max = c[0] if c[0] > x_max else x_max x_min = c[0] if c[0] < x_min else x_min y_max = c[1] if c[1] > y_max else y_max y_min = c[1] if c[1] < y_min else y_min # transfer values to new center x_trans = (x_max + x_min) / 2 y_trans = (y_max + y_min) / 2 # normalization factor of coordinates as ratio of norm and max([width,height]) norm_factor = norm / max([np.abs(x_max-x_min), np.abs(y_max-y_min)]) for cs in coords_list: for c in cs: c[0] = (c[0] - x_trans) * norm_factor + center[0] c[1] = (c[1] - y_trans) * norm_factor + center[1] def try_float(string): # Function for trying a string for conversion to float. try: f = float(string) return True except ValueError: return False def analyse_d_string(d_string): """ Analyse string of 'd' argument of path. The function returns the signal transform parameters and a list of coordinates of the control points. :param str d_string: Complete string contained in the 'd' parameter of a path. :returns: tuple(transform_keys, coordinates) WHERE list transform_keys is list of lower case signal characters for transformations list coordinates is list of absolute coordinates of control points """ content = d_string.split() prev_key = None points = [] transf_keys = [] cspline = [] count = 0 x_origin, y_origin = 0, 0 for num,section in enumerate(content): if len(section.split(",")) > 1 or try_float(section): # keys before coordinates signal new transformation, # lower case for relative, upper case for absolute coordinates special_keys = ['c','C','m','M', 'l', 'L'] # deal with exception, that 'm'/'M' keys may be followed by diagonal # transformation ('l' key) without explicit key if len(section.split(",")) > 1 and prev_key not in special_keys: prev_key = "l" if "c" == prev_key or "C" == prev_key: count += 1 if 3 == count: cspline.append([cspline[1][0]+cspline[1][0]-cspline[0][0],cspline[1][1]+cspline[1][1]-cspline[0][1]]) if "c" == prev_key: # relative reference point x_origin += float(content[num].split(",")[0]) y_origin += float(content[num].split(",")[1]) if "C" == prev_key: # absolute reference point x_origin = float(content[num].split(",")[0]) y_origin = float(content[num].split(",")[1]) # append intermediate control points points.append([cspline[0][0], cspline[0][1]]) transf_keys.append(prev_key) points.append([cspline[1][0], cspline[1][1]]) transf_keys.append(prev_key) count = 0 cspline = [] else: if "c" == prev_key: cspline.append([x_origin+float(content[num].split(",")[0]), y_origin+float(content[num].split(",")[1])]) if "C" == prev_key: cspline.append([float(content[num].split(",")[0]), float(content[num].split(",")[1])]) else: count = 0 # start of path if "m" == prev_key or "M" == prev_key: x_origin = float(content[num].split(",")[0]) y_origin = float(content[num].split(",")[1]) # horizontal transformation if "h" == prev_key: x_origin += float(content[num]) if "H" == prev_key: x_origin = float(content[num]) # vertical transformation if "v" == prev_key: y_origin += float(content[num]) if "V" == prev_key: y_origin = float(content[num]) # diagonal transformation if "l" == prev_key: x_origin += float(content[num].split(",")[0]) y_origin += float(content[num].split(",")[1]) if "L" == prev_key: x_origin = float(content[num].split(",")[0]) y_origin = float(content[num].split(",")[1]) if 0 == count: points.append([x_origin, y_origin]) transf_keys.append(prev_key.lower()) if 'M' == prev_key: prev_key = 'L' if 'm' == prev_key: prev_key = 'l' else: prev_key = section return transf_keys, points def controlpoints2cspline(bezier_points): """ Translate four input control points into a cubic Hermite spline format suitable for BART. :param list bezier_points: List of four control points in format [p1,p2,p3,p4] with p_i=[x_i,y_i] :returns: Parameters for cubic Hermite spline [x_parameters, y_parameters] :rtype: list """ bezier_cspline = [[1,-3,0,0],[0,3,0,0],[0,0,0,-3],[0,0,1,3]] bezier_x = [p[0] for p in bezier_points] bezier_y = [p[1] for p in bezier_points] bezier = [bezier_x, bezier_y] cspline = [[0,0] for i in range(4)] for num, c in enumerate(bezier): for i in range(4): for j in range(4): cspline[i][num] += bezier_cspline[j][i] * bezier[num][j] cspline_x = [p[0] for p in cspline] cspline_y = [p[1] for p in cspline] return [cspline_x, cspline_y] def get_transforms(keys, points): """ Create separate transformations from given lists of keys and coordinates. The transformations have the form [[x_transforms],[y_transforms]] in the cubic Hermite spline format. :param list keys: List of signal characters for path transformations [key1, key2, ...] :param list points: List of coordinates [[x1,y1], [x2,y2], ...] :returns: Transformations in cubic Hermite spline format :rtype: list """ transforms = [] for num,(k,p) in enumerate(zip(keys,points)): if 'h' == k: transforms.append([[points[num-1][0],0.,p[0],-0.],[p[1],0.,p[1],-0.]]) if 'v' == k: transforms.append([[p[0],0.,p[0],-0.],[points[num-1][1],0.,p[1],-0.]]) if 'l' == k: transforms.append([[points[num-1][0],0.,p[0],-0.],[points[num-1][1],0.,p[1],-0.]]) if num+1 < len(points) and 'c' == k: # non-trivial B-spline if 'c' == keys[num-1] and 'c' == keys[num+1]: keys[num+1] = None transforms.append(controlpoints2cspline(points[num-2:num+2])) # trivial B-spline elif 'c' != keys[num-1] and 'c' != keys[num+1]: transforms.append([[points[num-1][0],0.,p[0],-0.],[points[num-1][1],0.,p[1],-0.]]) return transforms def format_transforms(transforms, object_id, filename, output_file): """ Format transforms for insertion into /bart/src/geom/logo.c :param list transform: List of transformations in cubic Hermite spline format :param list object_id: List of object ids for indexing transformations :param str filename: Name of struct :param str output_file: File path to output text file """ total_transforms = sum([len(t) for t in transforms]) with open (output_file, 'w', encoding="utf8", errors='ignore') as output: output.write("//Replace in bart/src/geom/logo.c > bart_logo and adjust bart/src/geom/logo.h\n\n") output.write("const double "+filename+"["+str(total_transforms)+"][2][4] = {\n") for num, transform in enumerate(transforms): output.write("\t//"+str(object_id[num])+"\n") for enum,t in enumerate(transform): x_string = str(t[0][0])+", "+str(t[0][1])+", "+str(t[0][2])+", "+str(t[0][3]) y_string = str(t[1][0])+", "+str(t[1][1])+", "+str(t[1][2])+", "+str(t[1][3]) # current implementation in BART, likely to change to x_string, y_string in the future output.write("\t{ { "+y_string+" }, { "+x_string+" } },\n") output.write("};\n") def transform2polystruct(transforms, id_color, output_file): """ Create a polystruct for a given set of transformations and append it to output file. Can replace code in bart/src/simu/phantom.c > calc_bart :param list transforms: List of transformations in [[x_transforms],[y_transforms]] format :param list id_color: List of fill colors of individual objects :param str output_file: File path to output text file """ total_transforms = sum([len(t) for t in transforms]) with open (output_file, 'a', encoding="utf8", errors='ignore') as output: output.write("\tint N = "+str(total_transforms)+";\n") output.write("\tdouble points[N * 11][2];\n") output.write("\n") output.write("\tstruct poly poly = {\n\t\tkspace,\n\t\tcoeff,\n\t\tpopts->large_sens,\n\t\t"+str(len(transforms))+",\n\t\t&(struct poly1[]){\n") array_position = 0 for num, transform in enumerate(transforms): output.write("\t\t\t{ "+str(len(transform)*11)+" , "+str(id_color[num])+", ARRAY_SLICE(points, "+str(array_position*11)+", "+str((array_position+len(transform))*11) +") },\n") array_position += len(transform) output.write("\t\t}\n") output.write("\t};") output.close() def assign_color_id(colors): """ Extract color IDs from hex colors :param list colors: List of strings representing the objects colors :returns: List of Integers representing the objects colors as integer (> 0 !) IDs :rtype: list """ color_values, color_counts = np.unique(colors, return_counts=True) id_color = [list(color_values).index(i)+1 for i in colors] return id_color # Save geometry data in numpy array # coord -> [segment, cp_set:[x,y], cp_coord] with control points (cp) # meta -> [path index, number of segments, color of path] def save2cfl(new_transforms, new_colors, cfl_output): coord = [] meta = [] ind_path = 0 for sub_array in new_transforms: ind_seg = 0 for path in sub_array: path_array = np.array(path) coord.append(path_array) ind_seg += 1 meta.append(np.array([ind_path, ind_seg, new_colors[ind_path]])) ind_path += 1 coord = np.array(coord) meta = np.array(meta) if (2 <= DBLEVEL): print("Coord Dims:") print(np.shape(coord)) print("Meta Dims:") print(np.shape(meta)) print("Meta:") print(meta) cfl.writemulticfl(cfl_output, np.array([coord, meta], dtype=object)) def main(svg_input, text_output, output): """ Extract parameters of paths from SVG file and write code block into txt file, which is suitable for bart/src/simu/shepplogan.c > calc_bart and bart/src/geom/logo.c. :param str svg_input: File path to input SVG file :param str cfl: File path to output cfl file. Default: .{cfl,hdr} """ if (text_output): text_filename = output+".txt" path_objects = read_svg(svg_input) object_ids = [obj[0] for obj in path_objects] colors = [obj[1] for obj in path_objects] transforms = [obj[2] for obj in path_objects] # Sort paths by color (=: grey value in provided SVG file) id_color = assign_color_id(colors) new_colors = sorted(id_color) new_ids = [id for color, id in sorted(zip(id_color,object_ids))] new_transforms = [trans for color, trans in sorted(zip(id_color,transforms))] color_values, color_counts = np.unique(new_colors, return_counts=True) if (2 <= DBLEVEL): print("Distribution of colors:") print("Value:\t", color_values) print("Number:\t", color_counts) save2cfl(new_transforms, new_colors, output) if (1 <= DBLEVEL): print("Created files:") print(output+".{cfl,hdr}") if (text_output): format_transforms(new_transforms, new_ids, output, text_filename) transform2polystruct(new_transforms, new_colors, text_filename) if (1 <= DBLEVEL): print(output+".txt") if __name__ == "__main__": parser = argparse.ArgumentParser( description="Script to extract control points of cubic Hermite splines from SVG file to CFL format.") parser.add_argument('input', type=str, help="Input SVG file") parser.add_argument('output', type=str, help="Output CFL filename") parser.add_argument('-d', '--db', default=-1, type=int, help="Specify debug value for additional information [default: 0]") # Internal option for more complicated objects with multi component paths (example: BRAIN geometry) # Requires manual tuning and is therefore hidden for simplicity parser.add_argument('-t', action='store_true', help=argparse.SUPPRESS) args = parser.parse_args() if ("BART_DEBUG_LEVEL" in os.environ): if (-1 != args.db): print("A local BART_DEBUG_LEVEL variable exists! It will be overwritten by -d input!\n") DBLEVEL = int(os.environ["BART_DEBUG_LEVEL"]) elif ("DEBUG_LEVEL" in os.environ): DBLEVEL = int(os.environ["DEBUG_LEVEL"]) if (-1 != args.db): DBLEVEL = args.db main(args.input, args.t, args.output) libbart-devel/python/wslsupport.py000066400000000000000000000006401472525725500177450ustar00rootroot00000000000000import string import os def PathCorrection(inData): outData=inData for i in string.ascii_lowercase: #Replace drive letters with /mnt/ outData=outData.replace(i+':','/mnt/'+i) #if drive letter is supplied in lowercase outData=outData.replace(i.upper()+':','/mnt/'+i) #if drive letter is supplied as uppercase outData=outData.replace(os.path.sep, '/') #Change windows filesep to linux filesep return outData libbart-devel/rules/000077500000000000000000000000001472525725500147425ustar00rootroot00000000000000libbart-devel/rules/add_node_shebang.sh000077500000000000000000000003171472525725500205260ustar00rootroot00000000000000#!/bin/bash for exe in "$@" do if [ ! -f "$exe" ]; then continue fi if head -n 1 "$exe" | grep -q "/usr/bin/env"; then continue fi chmod +x "$exe" sed -i '1s|^|#!/usr/bin/env node\n|' "$exe" done libbart-devel/rules/make_symbol_table.sh000077500000000000000000000001311472525725500207450ustar00rootroot00000000000000#!/bin/bash EXEC=$1 OUT=$2 nm --defined-only ${EXEC} | cut -c11-16,19- | sort > ${OUT} libbart-devel/rules/update_commands.sh000077500000000000000000000005421472525725500204450ustar00rootroot00000000000000#!/bin/bash set -e BINDIR=$(dirname $0) BART_EXE=$1 shift OUTPUT=$1 shift XTARGETS=$@ TEST_FILE_COMMANDS=$(mktemp) echo "AUTOGENERATED. DO NOT EDIT." > ${TEST_FILE_COMMANDS} for cmd in ${XTARGETS} ; do printf "\n\n--%s--\n\n" $cmd ; ${BART_EXE} $cmd -h ; done >> ${TEST_FILE_COMMANDS} ${BINDIR}/update_if_changed.sh ${TEST_FILE_COMMANDS} ${OUTPUT} libbart-devel/rules/update_if_changed.sh000077500000000000000000000000561472525725500207130ustar00rootroot00000000000000#!/bin/bash cmp -s $1 $2 || mv $1 $2 rm -f $1 libbart-devel/rules/update_version.sh000077500000000000000000000002041472525725500203240ustar00rootroot00000000000000#!/bin/bash echo 'VERSION('`./git-version.sh`')' > version.new.$$ ./rules/update_if_changed.sh version.new.$$ src/misc/version.inc libbart-devel/save/000077500000000000000000000000001472525725500145465ustar00rootroot00000000000000libbart-devel/save/fftw/000077500000000000000000000000001472525725500155145ustar00rootroot00000000000000libbart-devel/save/fftw/README.txt000066400000000000000000000000221472525725500172040ustar00rootroot00000000000000Saves FFT wisdom. libbart-devel/save/nsv/000077500000000000000000000000001472525725500153545ustar00rootroot00000000000000libbart-devel/save/nsv/README.txt000066400000000000000000000000731472525725500170520ustar00rootroot00000000000000This folder is to save the simulations done by estvar/nsv. libbart-devel/scripts/000077500000000000000000000000001472525725500152775ustar00rootroot00000000000000libbart-devel/scripts/affine_kspace.sh000077500000000000000000000044311472525725500204160ustar00rootroot00000000000000#!/bin/bash # Copyright 2024. TU Graz. Institute of Biomedical Imaging. # Author: Moritz Blumenthal # helpstr=$(cat <<- EOF Transform k-space from a moved image (IM) to a reference image (IR) by an affine transform. The affine coordinate transform is defined such that: IR(xr) = IM(xm) = IM(Axr) = IM(Rxr + a) where A is a 4x4 matrix with (\ | / | | ) A = (- R - | a ) (/ | \ | | ) (0 0 0 | 1 ) The transform needs to be defined following the usual convention: 1.) Shifts are measured in units of FOV as the trajectory measures k-space coordinates in units 1/FOV 2.) The image origin (x=0) is at grid position N//2 (integer division for odd numbers) The transformed k-space is given by F[IR] (R^T k) = exp(i2pi ak) F[IM](k) Warning: A factor 1/|det(R)| is missing for non-rigid transforms! moved k-space: F[IM] moved trajectory: k affine transformation matrix: A reference k-space: F[IR] reference trajectory: R^T k -h help EOF ) usage="Usage: $0 [-h] " GPU="" BASIS="" while getopts "h" opt; do case $opt in h) echo "$usage" echo echo "$helpstr" exit 0 ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# -lt 5 ] ; then echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH/:$PATH" KSP_IN=$(readlink -f "$1") TRJ_IN=$(readlink -f "$2") AFFINE=$(readlink -f "$3") KSP_OUT=$(readlink -f "$4") TRJ_OUT=$(readlink -f "$5") WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR bart extract 0 0 3 1 0 3 $AFFINE rot # rotation of affine transform bart extract 0 0 3 1 3 4 $AFFINE shift # shift of affine transform bart transpose 0 1 rot rott bart transpose 1 3 rott rot READ=$(bart show -d1 $TRJ_IN) PHS1=$(bart show -d2 $TRJ_IN) bart transpose 0 3 $TRJ_IN trjt bart fmac -s8 rot trjt $TRJ_OUT #FIXME: TEST and add scaling by determinant. #bart determinant rott det #bart invert det idet #bart fmac idet $KSP_IN ksp bart fovshift -Sshift -t$TRJ_IN $KSP_IN $KSP_OUT libbart-devel/scripts/bart_completion.sh000066400000000000000000000014721472525725500210200ustar00rootroot00000000000000# bart parameter-completion function _bart() { local cur=${COMP_WORDS[$COMP_CWORD]} if [ $COMP_CWORD -eq 1 ] ; then local CMDS=$(bart | tail -n +2) COMPREPLY=($(compgen -W "$CMDS" -- "$cur")); else local bcmd=${COMP_WORDS[1]} case $cur in -*) COMPREPLY=($(bart ${bcmd} -h | grep -o -E "^${cur}\w*")) ;; *) case $bcmd in twixread) COMPREPLY=($(compgen -o plusdirs -f -X '!*.dat' -- ${cur})) ;; *) local CFLS=$(compgen -o plusdirs -f -X '!*.hdr' -- ${cur}) local COOS=$(compgen -o plusdirs -f -X '!*.coo' -- ${cur}); local RAS=$(compgen -o plusdirs -f -X '!*.ra' -- ${cur}); local suffix=".hdr" COMPREPLY=($(for i in ${CFLS} ${COOS} ${RAS}; do echo ${i%$suffix} ; done)) ;; esac ;; esac fi return 0 } complete -o filenames -F _bart bart ./bart libbart-devel/scripts/espirit_econ.sh000066400000000000000000000041121472525725500203140ustar00rootroot00000000000000#!/bin/bash # Copyright 2018. Martin Uecker. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2018 Martin Uecker # # Memory-saving ESPIRiT # set -e LOGFILE=/dev/stdout title=$(cat <<- EOF ESPIRiT-ECON EOF ) helpstr=$(cat <<- EOF -l logfile -h help EOF ) usage="Usage: $0 [-h] " echo "$title" echo while getopts "hl:" opt; do case $opt in h) echo "$usage" echo echo "$helpstr" exit 0 ;; l) LOGFILE=$(readlink -f "$OPTARG") ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# -lt 2 ] ; then echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" input=$(readlink -f "$1") output=$(readlink -f "$2") if [ ! -e $input.cfl ] ; then echo "Input file does not exist." >&2 echo "$usage" >&2 exit 1 fi if [ ! -e $TOOLBOX_PATH/bart ] ; then echo "\$TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi #WORKDIR=$(mktemp -d) # Mac: http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR # start group for redirection of output to the logfile { XX=$(bart show -d0 $input) YY=$(bart show -d1 $input) ZZ=$(bart show -d2 $input) DIM=2 # To decouple along another dimension: # 1. change DIM # 2. replace ZZ below # 3. change the ecaltwo command bart ecalib -1 $input eon # zero-pad bart fft $(bart bitmask ${DIM}) eon eon_fft bart resize -c ${DIM} ${ZZ} eon_fft eon_fft2 bart fft -i $(bart bitmask ${DIM}) eon_fft2 eon for i in `seq -w 0 $(($ZZ - 1))` ; do bart slice ${DIM} $i eon sl bart ecaltwo ${XX} ${YY} 1 sl sens-$i.coo done # # join slices back together bart join ${DIM} sens-*.coo $output } > $LOGFILE exit 0 libbart-devel/scripts/grasp.sh000066400000000000000000000130111472525725500167430ustar00rootroot00000000000000#!/bin/bash # Copyright 2015. The Regents of the University of California. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2015 Martin Uecker # # Compressed sensing parallel imaging reconstruction with temporal # total-variation regularization for Siemens radial VIBE sequence # with golden-angle sampling (GRASP). # set -e # default settings export SPOKES=21 export SKIP=0 export CALIB=400 export ITER=30 export REG=0.05 SCALE=0.6 LOGFILE=/dev/stdout MAXPROC=4 MAXTHREADS=4 title=$(cat <<- EOF (BART-)GRASP v0.3 (Berkeley Advanced Reconstruction Toolbox) --- EXPERIMENTAL --- FOR RESEARCH USE ONLY --- EOF ) helpstr=$(cat <<- EOF Compressed sensing parallel imaging reconstruction with temporal total-variation regularization for Siemens radial VIBE sequence with golden-angle sampling (GRASP). This script requires the Berkeley Advanced Reconstruction Toolbox version 0.2.09. (later versions may also work). -s spokes number of spokes per frame -r lambda regularization parameter -p maxproc max. number of slices processed in parallel -t maxthreads max. number of threads per slice -l logfile -h help EOF ) usage="Usage: $0 [-h] [-s spokes] [-r lambda] " echo "$title" echo while getopts "hl:s:p:t:r:" opt; do case $opt in s) SPOKES=$OPTARG ;; r) REG=$OPTARG ;; h) echo "$usage" echo echo "$helpstr" exit 0 ;; l) LOGFILE=$(readlink -f "$OPTARG") ;; p) MAXPROC=$OPTARG ;; t) MAXTHREADS=$OPTARG ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# -lt 2 ] ; then echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" input=$(readlink -f "$1") output=$(readlink -f "$2") if [ ! -e $input ] ; then echo "Input file does not exist." >&2 echo "$usage" >&2 exit 1 fi if [ ! -e $TOOLBOX_PATH/bart ] ; then echo "\$TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi #WORKDIR=$(mktemp -d) # Mac: http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR # start group for redirection of output to the logfile { # read TWIX file bart twixread -A $input grasp export READ=$(bart show -d0 grasp) export COILS=$(bart show -d3 grasp) export PHASES=$(($(bart show -d1 grasp) / $SPOKES)) export OMP_NUM_THREADS=$((MAXPROC * $MAXTHREADS)) # zero-pad #flip $(bitmask 2) grasp grasp2 #resize 2 64 grasp2 grasp #circshift 2 10 grasp grasp2 #fft -u $(bitmask 2) grasp2 grasp_hybrid #rm grasp.* grasp2.* # inverse FFT along 3rd dimension bart fft -i -u $(bart bitmask 2) grasp grasp_hybrid rm grasp.cfl grasp.hdr SLICES=$(bart show -d2 grasp_hybrid) # create trajectory with 400 spokes and 2x oversampling bart traj -G -x$READ -y$CALIB r bart scale $SCALE r rcalib # create trajectory with 2064 spokes and 2x oversampling bart traj -G -x$READ -y$(($SPOKES * $PHASES)) r bart scale $SCALE r r2 # split off time dimension into index 10 bart reshape $(bart bitmask 2 10) $SPOKES $PHASES r2 rfull # number of threads per slice export OMP_NUM_THREADS=$MAXTHREADS calib_slice() { # extract slice bart slice 2 $1 grasp_hybrid grasp1-$1 # extract first $CALIB spokes bart extract 1 $(($SKIP + 0)) $(($SKIP + $CALIB)) grasp1-$1 grasp2-$1 # reshape dimensions bart reshape $(bart bitmask 0 1 2 3) 1 $READ $CALIB $COILS grasp2-$1 grasp3-$1 # apply inverse nufft to first $CALIB spokes bart nufft -i -t rcalib grasp3-$1 img-$1.coo } recon_slice() { # extract sensitivities for slice bart slice 2 $1 sens sens-$1 # extract spokes and split-off time dim bart extract 1 $(($SKIP + 0)) $(($SKIP + $SPOKES * $PHASES)) grasp1-$1 grasp2-$1 bart reshape $(bart bitmask 1 2) $SPOKES $PHASES grasp2-$1 grasp1-$1 # move time dimensions to dim 10 and reshape bart transpose 2 10 grasp1-$1 grasp2-$1 bart reshape $(bart bitmask 0 1 2) 1 $READ $SPOKES grasp2-$1 grasp1-$1 rm grasp2-$1.cfl grasp2-$1.hdr # reconstruction with tv penalty along dimension 10 # old (v0.2.08): # pics -S -d5 -lv -u10. -r$REG -R$(bitmask 10) -i$ITER -t rfull grasp1-$1 sens-$1 i-$1.coo # new (v0.2.09): bart pics -S -d5 -u10. -RT:$(bart bitmask 10):0:$REG -i$ITER -t rfull grasp1-$1 sens-$1 i-$1.coo # clean up temp files rm *-$1.cfl *-$1.hdr } export -f calib_slice export -f recon_slice # loop over slices seq -w 0 $(($SLICES - 1)) | xargs -I {} -P $MAXPROC bash -c "calib_slice {}" # transform back to k-space and compute sensitivities bart join 2 img-*.coo img bart fft -u $(bart bitmask 0 1 2) img ksp #ecalib -S -c0.8 -m1 -r20 ksp sens # transpose because we already support off-center calibration region # in dim 0 but here we might have it in 2 bart transpose 0 2 ksp ksp2 bart ecalib -S -c0.8 -m1 -r20 ksp2 sens2 bart transpose 0 2 sens2 sens # loop over slices seq -w 0 $(($SLICES - 1)) | xargs -I {} -P $MAXPROC bash -c "recon_slice {}" #echo 20 | xargs -i --max-procs=$MAXPROC bash -c "recon_slice {}" # join slices back together bart join 2 i-*.coo $output # generate dicoms #for s in $(seq -w 0 $(($SLICES - 1))) ; do # for p in $(seq -w 0 $(($PHASES - 1))) ; do # bart slice 10 $p i-$s.coo i-$p-$s.coo # bart toimg i-$p-$s.coo $output.series$p.slice$s.dcm # done #done } > $LOGFILE exit 0 libbart-devel/scripts/kspace_precond.sh000077500000000000000000000055421472525725500206240ustar00rootroot00000000000000#!/bin/bash # Copyright 2022. TU Graz. Institute of Biomedical Imaging. # Author: Moritz Blumenthal # # F. Ong, M. Uecker and M. Lustig, Accelerating Non-Cartesian # MRI Reconstruction Convergence Using k-Space Preconditioning # IEEE TMI, 2020 39:1646-1654 # set -eu helpstr=$(cat <<- EOF Compute k-space preconditioner P such that ||P^2 AA^H - 1|| is minimal Note the square in the definition. The preconditioner can be used directly as weights in PICS. contains ones with image dimensions -B file subspace basis -g use GPU -h help EOF ) usage="Usage: $0 [-h] [-g] [-B ] [-p ] " GPU="" BASIS="" while getopts "hgB:p:" opt; do case $opt in g) GPU="-g" ;; B) BASIS=$(readlink -f "$OPTARG") ;; p) PATTERN="-p $(readlink -f "$OPTARG")" ;; h) echo "$usage" echo echo "$helpstr" exit 0 ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# -lt 3 ] ; then echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" ones=$(readlink -f "$1") traj=$(readlink -f "$2") prec=$(readlink -f "$3") WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR X=$(bart show -d0 $ones) Y=$(bart show -d1 $ones) Z=$(bart show -d2 $ones) s1=$((X*Y*Z)) if [[ 1 != $X ]] ; then X=$((2*X)); fi if [[ 1 != $Y ]] ; then Y=$((2*Y)); fi if [[ 1 != $Z ]] ; then Z=$((2*Z)); fi s2=$((X*Y*Z)) s3=$(echo "$s1*e(-1.5*l($s2))"|bc -l) bart fmac -C -s7 $ones $ones mps_norm2 bart scale $s3 mps_norm2 scale ksp_dims="1" for i in $(seq 15); do ksp_dims+=" $(bart show -d$i $traj)" done LOOPFLAGS=$(bart bitmask 5 6 7 8 9 10 11 12 13 14 15) if [[ "$BASIS" != "" ]] ; then bart fmac -C -s$(bart bitmask 6) $BASIS $BASIS bas_scale bart fmac scale bas_scale scale2 bart copy scale2 scale COE=$(bart show -d6 $BASIS) bart transpose 6 7 $BASIS basis_r bart fmac -C $BASIS basis_r basis_2 bart reshape $(bart bitmask 6 7) $((COE*COE)) 1 basis_2 basis BASIS="-B basis" LOOPFLAGS=$(bart bitmask 7 8 9 10 11 12 13 14 15) fi bart ones 16 $ksp_dims ksp bart scale 2 $traj traj2 bart -l$LOOPFLAGS -r ksp nufft $BASIS $PATTERN -P --lowmem --no-precomp -a $GPU -x$X:$Y:$Z traj2 ksp psf bart resize -c 0 $X 1 $Y 2 $Z $ones ones_os bart fft -u 7 ones_os ones_ksp1 bart fmac -C ones_ksp1 ones_ksp1 ones_ksp bart fft -u -i 7 ones_ksp ones_img bart fmac psf ones_img psf_mul bart -l$LOOPFLAGS -r psf_mul nufft $BASIS $PATTERN -P --lowmem --no-precomp $GPU traj2 psf_mul pre_inv bart creal pre_inv pre_inv_real bart invert pre_inv_real pre_real bart fmac pre_real scale pre_sqr bart spow -- 0.5 pre_sqr $prec libbart-devel/scripts/life.sh000066400000000000000000000023631472525725500165560ustar00rootroot00000000000000#!/bin/bash if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" #bart vec 0 0 1 0 v1 #bart vec 0 0 0 1 v2 #bart vec 0 1 1 1 v3 #bart join 1 v1 v2 v3 v #bart vec 0 0 1 1 v1 #bart vec 0 1 1 0 v2 #bart vec 0 0 1 0 v3 #bart join 1 v1 v2 v3 v #bart vec 0 1 1 1 0 1 v1 #bart vec 0 1 0 0 0 0 v2 #bart vec 0 0 0 0 1 1 v3 #bart vec 0 0 1 1 0 1 v4 #bart vec 0 1 0 1 0 1 v5 #bart join 1 v1 v2 v3 v4 v5 v #bart resize -c 0 300 1 300 v o #bart conway -n3000 o x bart vec 0 0 0 1 1 1 0 0 0 1 1 1 0 0 v0 bart vec 0 0 0 0 0 0 0 0 0 0 0 0 0 0 v1 bart vec 0 1 0 0 0 0 1 0 1 0 0 0 0 1 v2 bart vec 0 1 0 0 0 0 1 0 1 0 0 0 0 1 v3 bart vec 0 1 0 0 0 0 1 0 1 0 0 0 0 1 v4 bart vec 0 0 0 1 1 1 0 0 0 1 1 1 0 0 v5 bart vec 0 0 0 0 0 0 0 0 0 0 0 0 0 0 v6 bart vec 0 0 0 1 1 1 0 0 0 1 1 1 0 0 v7 bart vec 0 1 0 0 0 0 1 0 1 0 0 0 0 1 v8 bart vec 0 1 0 0 0 0 1 0 1 0 0 0 0 1 v9 bart vec 0 1 0 0 0 0 1 0 1 0 0 0 0 1 va bart vec 0 0 0 0 0 0 0 0 0 0 0 0 0 0 vb bart vec 0 0 0 1 1 1 0 0 0 1 1 1 0 0 vc bart join 1 v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 va vb vc v bart resize -c 0 50 1 50 v o bart conway -n3 o x libbart-devel/scripts/octview.m000077500000000000000000000004041472525725500171360ustar00rootroot00000000000000#! /usr/bin/octave -qf addpath(strcat(getenv("BART_TOOLBOX_PATH"), "/matlab")); addpath(strcat(getenv("TOOLBOX_PATH"), "/matlab")); % support old environment variable arg_list = argv(); data = squeeze(readcfl(arg_list{1})); imshow3(abs(data), []); pause; libbart-devel/scripts/phantom.sh000077500000000000000000000140771472525725500173150ustar00rootroot00000000000000#!/bin/bash # Copyright 2022. TU Graz. Institute of Biomedical Imaging. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Author: # 2022 Nick Scholand # # Creation of digital reference object. set -e LOGFILE=/dev/stdout KSPACE=false SENS=1 ROT_ANGLE=0 ROT_STEPS=1 GEOM=NIST title=$(cat <<- EOF Digital Reference Object EOF ) helpstr=$(cat <<- EOF -S \t\t Diagnostic Sonar geometry (NIST phantom is default) -k \t\t simulate in k-space -a d \t\t angle of rotation -r d \t\t number of rotation steps -s d \t\t number of simulated coils -t \t define custom trajectory file -l \t\t logfile -h \t\t help Please adjust simulation parameters inside the script. EOF ) usage="Usage: $0 [-h] [-k] [-r d] [-s d] [-t ] " echo "$title" echo while getopts "hSka:r:s:t:l:" opt; do case $opt in h) echo "$usage" echo echo -e "$helpstr" exit 0 ;; S) GEOM=SONAR ;; k) KSPACE=true ;; a) ROT_ANGLE=$OPTARG ;; r) ROT_STEPS=$OPTARG ;; s) SENS=$OPTARG ;; t) TRAJ=$(readlink -f "$OPTARG") ;; l) LOGFILE=$(readlink -f "$OPTARG") ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# != 1 ] ; then echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" output=$(readlink -f "$1") # Tests for useful input if [ ! -z "${TRAJ}" ] && [ "$KSPACE" = false ]; then echo "Trajectory only works in k-space domain. Please add [-k]!" >&2 exit 1 fi #WORKDIR=$(mktemp -d) # Mac: http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR # start group for redirection of output to the logfile { case $GEOM in NIST) echo "NIST Phantom Geometry" echo "T2 Sphere of Model 130" echo "Relaxation Parameters for 3 T" echo "" ## Relaxation parameters for T2 Sphere of NIST phantom at 3 T (Model 130) ## Stupic, KF, Ainslie, M, Boss, MA, et al. ## A standard system phantom for magnetic resonance imaging. ## Magn Reson Med. 2021; 86: 1194– 1211. https://doi.org/10.1002/mrm.28779 T1=(3 2.48 2.173 1.907 1.604 1.332 1.044 0.802 0.609 0.458 0.337 0.244 0.177 0.127 0.091) T2=(1 0.581 0.404 0.278 0.191 0.133 0.097 0.064 0.046 0.032 0.023 0.016 0.011 0.008 0.006) ;; SONAR) echo "Diagnostic Sonar Phantom Geometry" echo "Eurospin II" echo "Gels: 3, 4, 7, 10, 14, and 16" echo "" ## Relaxation parameters for Diagnostic Sonar phantom ## Eurospin II, gel nos 3, 4, 7, 10, 14, and 16) ## T1 from reference measurements in ## Wang, X., Roeloffs, V., Klosowski, J., Tan, Z., Voit, D., Uecker, M. and Frahm, J. (2018), ## Model-based T1 mapping with sparsity constraints using single-shot inversion-recovery radial FLASH. ## Magn. Reson. Med, 79: 730-740. https://doi.org/10.1002/mrm.26726 ## T2 from ## T. J. Sumpf, A. Petrovic, M. Uecker, F. Knoll and J. Frahm, ## Fast T2 Mapping With Improved Accuracy Using Undersampled Spin-Echo MRI and Model-Based Reconstructions With a Generating Function ## IEEE Transactions on Medical Imaging, vol. 33, no. 12, pp. 2213-2222, Dec. 2014, doi: 10.1109/TMI.2014.2333370. T1=(3 0.311 0.458 0.633 0.805 1.1158 1.441 3) T2=(1 0.046 0.081 0.101 0.132 0.138 0.166 1) ;; *) echo -n "Unknown geometry!\n" exit 1 ;; esac # Simulation Parameters # Run `bart sim --seq h` for more details SEQ=IR-FLASH # Sequence Type TR=0.0034 # Repetition Time [s] TE=0.0021 # Echo Time [s] REP=600 # Number of repetitions IPL=0.01 # Inversion Pulse Length [s] ISP=0.005 # Inversion Spoiler Gradient Length [s] PPL=0 # Preparation Pulse Length [s] TRF=0.001 # Pulse Duration [s] FA=6 # Flip Angle [degree] BWTP=4 # Bandwidth-Time-Product OFF=0 # Off-Resonance [rad/s] SLGRAD=0 # Slice Selection Gradient Strength [T/m] SLTHICK=0 # Thickness of Simulated Slice [m] NSPINS=1 # Number of Simulated Spins # Run Simulation for i in `seq 0 $((${#T1[@]}-1))`; do echo -e "Tube $i\t T1: ${T1[$i]} s,\tT2[$i]: ${T2[$i]} s" bart sim --ODE \ --seq $SEQ,TR=$TR,TE=$TE,Nrep=$REP,ipl=$IPL,isp=$ISP,ppl=$PPL,Trf=$TRF,FA=$FA,BWTP=$BWTP,off=$OFF,sl-grad=$SLGRAD,slice-thickness=$SLTHICK,Nspins=$NSPINS \ -1 ${T1[$i]}:${T1[$i]}:1 -2 ${T2[$i]}:${T2[$i]}:1 \ _simu$(printf "%02d" $i) done # Join individual simulations bart join 7 $(ls _simu*.cfl | sed -e 's/\.cfl//') simu # Join simulations in a single dimension (-> 6) bart reshape $(bart bitmask 6 7) ${#T1[@]} 1 simu simu2 # Create Geometry if [ -z "${TRAJ}" ]; then if $KSPACE; then # Create default trajectory DIM=192 SPOKES=$((DIM-1)) bart traj -x $DIM -y $SPOKES traj bart phantom --${GEOM} -b -s $SENS --rotation-steps $ROT_STEPS --rotation-angle $ROT_ANGLE -t traj geom else bart phantom --${GEOM} -b -s $SENS --rotation-steps $ROT_STEPS --rotation-angle $ROT_ANGLE geom fi else if $KSPACE; then bart phantom --${GEOM} -b -s $SENS --rotation-steps $ROT_STEPS --rotation-angle $ROT_ANGLE -k -t ${TRAJ} geom else bart phantom --${GEOM} -b -s $SENS --rotation-steps $ROT_STEPS --rotation-angle $ROT_ANGLE geom fi fi # Combine simulated signal and geometry bart fmac -s $(bart bitmask 6) geom simu2 $output } > $LOGFILE [ -d $WORKDIR ] && rm -rf $WORKDIR exit 0 libbart-devel/scripts/profile.sh000066400000000000000000000024051472525725500172740ustar00rootroot00000000000000#!/bin/sh set -e usage="Usage: $0 " helpstr=$(cat <<- EOF Postprocess debugging output from BART to extract profiling information and to translate pointer values to symbol names. -h help EOF ) while getopts "h" opt; do case $opt in h) echo "$usage" echo echo "$helpstr" exit 0 ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# -lt 2 ] ; then echo "$usage" >&2 exit 1 fi in=$(readlink -f "$1") out=$(readlink -f "$2") if [ ! -e $input ] ; then echo "Input file does not exist." >&2 echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi #WORKDIR=$(mktemp -d) # Mac: http://unix.stackexchange.com/questions/30091/fix-or-alternative-for-mktemp-in-os-x WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR nm --defined-only "$BART_TOOLBOX_PATH"/bart | cut -c11-16,19- | sort > bart.syms cat $in | grep "^TRACE" \ | grep " 0x" \ | cut -c7-23,25-31,34- \ | sort -k3 \ | join -11 -23 bart.syms - \ | cut -c8- \ | sort -k2 > $out libbart-devel/scripts/radial_dcf.sh000066400000000000000000000021641472525725500177060ustar00rootroot00000000000000#!/bin/bash if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" # oversampled radial trajectory bart traj -r -y55 -x256 traj_tmp bart scale 0.5 traj_tmp traj # simulate k-space bart phantom -t traj ksp # compute Ram-Lak filter bart rss 1 traj ramlak # apply to data bart fmac ksp ramlak ksp_filt # adjoint nufft bart nufft -a traj ksp img bart nufft -a traj ksp_filt img_filt # grid and degrid ones bart ones 3 1 256 55 ones bart nufft -a traj ones dens_tmp bart nufft traj dens_tmp density # sqrt bart spow -- -1. density dcf # inv sqrt bart spow -- -0.5 density sqdcf # adjoint nufft bart fmac dcf ksp ksp_filt2 bart nufft -a traj ksp_filt2 img_filt2 # one channel all ones sensititty bart ones 3 256 256 1 sens # without dcf bart pics -i30 -t traj ksp sens img_pics_i30 bart pics -i3 -t traj ksp sens img_pics_i3 # with dcf bart pics -i30 -t traj -p sqdcf ksp sens img_pics_dcf_i30 bart pics -i3 -t traj -p sqdcf ksp sens img_pics_dcf_i3 libbart-devel/scripts/rovir.sh000077500000000000000000000046121472525725500170020ustar00rootroot00000000000000#!/bin/bash # Copyright 2023. TU Graz. Institute of Biomedical Imaging. # Author: Moritz Blumenthal # # Kim, D, Cauley, SF, Nayak, KS, Leahy, RM, Haldar, JP. # Region-optimized virtual (ROVir) coils: Localization and/or # suppression of spatial regions using sensor-domain beamforming. # Magn Reson Med. 2021; 86: 197–212. # set -eu helpstr=$(cat <<- EOF Compute coil compression following the ROVir method. Signal to be compressed Mask (1/0) for region of interest to be optimized for. Defines also low resolution image. Compressed signal or coefficient matrix -p N compress to N virtual channels -t file trajectory -B file subspace basis -M output coefficients -g use GPU -h help EOF ) usage="Usage: $0 [-h] [-g] [-t ][-B ] " CC="" GPU="" TRAJ="" BASIS="" COEFFS=0 while getopts "hgB:t:Mp:" opt; do case $opt in g) GPU=" -g" ;; t) TRAJ=$(readlink -f "$OPTARG") ;; p) CC=" -p $OPTARG" ;; M) COEFFS=1 ;; B) BASIS="-B $(readlink -f "$OPTARG")" ;; h) echo "$usage" echo echo "$helpstr" exit 0 ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [ $# -ne 4 ] ; then echo "$usage" >&2 exit 1 fi if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" sig=$(readlink -f "$1") pos=$(readlink -f "$2") neg=$(readlink -f "$3") out=$(readlink -f "$4") WORKDIR=`mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir'` trap 'rm -rf "$WORKDIR"' EXIT cd $WORKDIR if [ -z "$TRAJ" ] ; then DIMS="0 $(bart show -d 0 $pos) 1 $(bart show -d 1 $pos) 2 $(bart show -d 2 $pos)" bart resize -c $DIMS $sig res bart fft -i 7 res img bart fmac img $pos pos bart fmac img $neg neg else DIMS="$(bart show -d 0 $pos):$(bart show -d 1 $pos):$(bart show -d 2 $pos)" bart nufftbase $DIMS $TRAJ pat bart nufft $BASIS -p pat -i -x$DIMS $TRAJ $sig cim bart fmac cim $pos ipos bart fmac cim $neg ineg bart nufft $BASIS -ppat $TRAJ ipos pos bart nufft $BASIS -ppat $TRAJ ineg neg fi bart rovir pos neg compress if [[ "$COEFFS" -eq 1 ]]; then bart copy compress $out else bart ccapply $CC $sig compress $out fi libbart-devel/scripts/rtnlinv.m000066400000000000000000000026441472525725500171570ustar00rootroot00000000000000% 2015, Martin Uecker % % Example script to use BART for the initial preprocessing % (gridding) which is required - but not included - in the % original Matlab RT-NLINV example. The example is for a % single frame, but this should also work in a similar way % for the RT-NLINV2 code which reconstructs a time-series % of images from highly undersampled data using temporal % regularization. % % Links to the Matlab code can be found here: % http://www.eecs.berkeley.edu/~uecker/toolbox.html % % References: % % Uecker M et al., Nonlinear Inverse Reconstruction for Real-time MRI % of the Human Heart Using Undersampled Radial FLASH, % MRM 63:1456-1462 (2010) % % Uecker M et al., Real-time magnetic resonance imaging at 20 ms % resolution, NMR in Biomedicine 23: 986-994 (2010) % % data set is included in the IRGNTV example A = load('radial_cardiac_25_projections.mat'); % re-format trajectory for BART t = zeros(3, 256, 25); t(1,:,:) = real(A.k) * 384.; t(2,:,:) = imag(A.k) * 384.; % use adjoint nufft to interpolate data onto Cartesia grid adj = bart('nufft -d384:384:1 -a ', t, reshape(A.rawdata, [1 256 25 12])); % compute point-spread function psf = bart('nufft -d384:384:1 -a ', t, ones(1, 256, 25)); % transform back to k-space adjk = bart('fft -u 7', adj); psfk = bart('fft -u 7', psf); % use nlinv from RT-NLINV (nlinv2) matlab package R = nlinv(squeeze(adjk), squeeze(psfk) * 1., 9, 'noncart'); libbart-devel/scripts/rtreco.sh000077500000000000000000000275041472525725500171440ustar00rootroot00000000000000#!/bin/bash # Copyright 2024. Institute of Biomedical Imaging. TU Graz. # All rights reserved. Use of this source code is governed by # a BSD-style license which can be found in the LICENSE file. # # Authors: # 2024 Moritz Blumenthal # 2024 Philip Schaten set -e LOGFILE=/dev/stderr title=$(cat <<- EOF Real-Time Reconstruction EOF ) helpstr=$(cat <<- EOF -h help -l logfile -t #turns -f median filter -R ROVIR -G Real-time geometric decomposition coil compression -S Static Coil Compression matrix estimated from first frame. EOF ) usage="Usage: $0 [-h,l,t,f] [(-R|-G)] []" TURNS=5 ROVIR=false GEOM=false FILTER=false STATIC_COILS=false OVERGRIDDING=1.5 DELAY=2 CHANNELS=8 export TMPDIR=/dev/shm/ while getopts "hl:t:fRTp:SG" opt; do case $opt in h) echo "$usage" echo echo "$helpstr" exit 0 ;; l) LOGFILE=$(readlink -f "$OPTARG") ;; t) TURNS=$OPTARG ;; f) FILTER=true ;; T) TIME=-t ;; R) ROVIR=true ;; S) STATIC_COILS=true ;; p) CHANNELS="$OPTARG" ;; G) GEOM=true ;; \?) echo "$usage" >&2 exit 1 ;; esac done shift $((OPTIND - 1)) export ROVIR export TURNS export DELAY export CHANNELS export STATIC_COILS export GEOM export BART_DEBUG_STREAM=1 if [ $# -lt 2 ] ; then echo "$usage" >&2 exit 1 fi if [ $# -gt 3 ] ; then echo "$usage" >&2 exit 1 fi echo "$title" >>$LOGFILE echo >>$LOGFILE if [ ! -e "$BART_TOOLBOX_PATH"/bart ] ; then if [ -e "$TOOLBOX_PATH"/bart ] ; then BART_TOOLBOX_PATH="$TOOLBOX_PATH" else echo "\$BART_TOOLBOX_PATH is not set correctly!" >&2 exit 1 fi fi export PATH="$BART_TOOLBOX_PATH:$PATH" if [ "-" = "$1" ]; then KSP=- else KSP=$(readlink -f "$1") fi if [ "-" = "$2" ]; then REC=- else REC=$(readlink -f "$2") fi if [ $# -eq 3 ]; then COILS=$(readlink -f "$3") fi delay () ( #delays input by prepending the first frame START times and cropps the last END frames DIM=$1 START=$2 END=$3 SRC=$(readlink -f $4) DST=$(readlink -f $5) WORKDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') trap 'rm -rf "$WORKDIR"' EXIT cd "$WORKDIR" || exit mkfifo first.fifo mkfifo end1.fifo mkfifo end2.fifo mkfifo meta.fifo cat $SRC | bart tee --out0 meta.fifo -n first.fifo end1.fifo & TOT=$(bart show -d $DIM meta.fifo) END=$((TOT-END)) bart -l$(bart bitmask $DIM) -e$END copy end1.fifo end2.fifo & bart -l$(bart bitmask $DIM) -e$START copy -- first.fifo - | \ bart join -s -- $DIM - end2.fifo $DST ) filter () ( #temporal median filter with filter size WIN WIN=$1 SRC=$(readlink -f $2) DST=$(readlink -f $3) WORKDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') trap 'rm -rf "$WORKDIR"' EXIT cd "$WORKDIR" || exit FIFOS="" for i in $(seq $WIN) ; do mkfifo fil_0_$i.fifo mkfifo fil_1_$i.fifo SLIC+=" fil_0_$i.fifo" JOIN+=" fil_1_$i.fifo" done mkfifo meta.fifo mkfifo delay.fifo delay 10 $((WIN-1)) 0 $SRC delay.fifo & bart copy --stream 1024 -- delay.fifo - | bart tee --out0 meta.fifo | bart tee bart $SLIC > /dev/null & TOT=$(bart show -d10 meta.fifo) for i in $(seq $WIN) ; do bart -l1024 -s $((i-1)) -e $((TOT-WIN+i)) flip 0 fil_0_$i.fifo fil_1_$i.fifo & done bart -r fil_1_1.fifo join -- 11 $JOIN - | \ bart -r - filter -m11 -l5 -- - $DST ) trajectory () ( # generate trajectory and correct gradient delays with ring # gradient delays are taken from previous turns shifted by DELAY KSP=$(readlink -f $1) DST=$(readlink -f $2) WORKDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') trap 'rm -rf "$WORKDIR"' EXIT cd "$WORKDIR" || exit mkfifo ksp_tmp.fifo mkfifo meta0.fifo mkfifo meta1.fifo mkfifo meta2.fifo cat $KSP | \ bart tee --out0 meta0.fifo | \ bart tee --out0 meta1.fifo | \ bart tee --out0 meta2.fifo | \ bart copy --stream 1024 -- - ksp_tmp.fifo & #FIXME DEADLOCK: #> ksp_tmp.fifo & READ=$(($(bart show -d 1 meta0.fifo)/2)) PHS1=$(bart show -d 2 meta1.fifo) TOT=$(bart show -d 10 meta2.fifo) topts=(-o2 -r -D -l -x"$READ" -y"$PHS1" -t"$TURNS" -O) bart traj "${topts[@]}" trj_tmp bart reshape -- $(bart bitmask 2 10) $((PHS1*TURNS)) 1 trj_tmp trj_gd bart zeros 1 3 init mkfifo predelay.fifo mkfifo postdelay.fifo bart reshape -s 2048 -- $(bart bitmask 2 10 11) $((PHS1*TURNS)) 1 $((TOT/TURNS)) ksp_tmp.fifo - | \ bart -t4 -r - estdelay -p10 -R -r2 -- trj_gd - predelay.fifo | \ delay 11 $DELAY $DELAY predelay.fifo postdelay.fifo & bart -t4 -r postdelay.fifo traj "${topts[@]}" -V postdelay.fifo -- - | \ bart reshape -s 1024 -- $(bart bitmask 2 10 11) $PHS1 $TOT 1 - $DST ) coilcompression_svd () ( # SVD based coil compression # TRJ is void but provided for easy replacement with rovir KSP=$(readlink -f $1) TRJ=$(readlink -f $2) DST=$(readlink -f $3) WORKDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') trap 'rm -rf "$WORKDIR"' EXIT cd "$WORKDIR" || exit mkfifo ksp_tmp.fifo mkfifo meta0.fifo mkfifo meta1.fifo mkfifo predelay.fifo mkfifo cc.fifo mkfifo tmp.fifo cat $KSP | \ bart tee --out0 meta0.fifo | \ bart tee --out0 meta1.fifo | \ bart copy --stream 1024 -- - ksp_tmp.fifo & PHS=$(bart show -d 2 meta0.fifo) TOT=$(bart show -d 10 meta1.fifo) cat $TRJ > /dev/null & cat ksp_tmp.fifo | \ bart tee tmp.fifo | \ bart reshape -s1024 -- $(bart bitmask 2 10) $((PHS*TURNS)) $((TOT/TURNS)) - - | \ bart -r - cc -M -- - predelay.fifo & delay 10 $DELAY $DELAY predelay.fifo cc.fifo & bart -r cc.fifo repmat -- 9 $TURNS cc.fifo - | \ bart reshape -s1024 -- $(bart bitmask 9 10) 1 $TOT - - | \ bart -r - ccapply -p$CHANNELS -- tmp.fifo - $DST ) coilcompression_svd_first () ( # SVD based coil compression # TRJ is void but provided for easy replacement with rovir KSP=$(readlink -f $1) TRJ=$(readlink -f $2) DST=$(readlink -f $3) WORKDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') trap 'rm -rf "$WORKDIR"' EXIT cd "$WORKDIR" || exit mkfifo ksp_tmp.fifo mkfifo meta0.fifo mkfifo meta1.fifo mkfifo predelay.fifo mkfifo cc.fifo mkfifo cc2.fifo mkfifo tmp.fifo mkfifo ccmat.fifo cat $KSP | \ bart tee --out0 meta0.fifo | \ bart tee --out0 meta1.fifo | \ bart copy --stream 1024 -- - ksp_tmp.fifo & PHS=$(bart show -d 2 meta0.fifo) TOT=$(bart show -d 10 meta1.fifo) cat $TRJ > /dev/null & cat ksp_tmp.fifo | \ bart tee tmp.fifo | \ bart reshape -s1024 -- $(bart bitmask 2 10) $((PHS*TURNS)) $((TOT/TURNS)) - - | \ bart -r - cc -M -- - - | bart tee -n cc.fifo & bart -l 1024 copy -- cc.fifo cc2 bart -r tmp.fifo ccapply -p$CHANNELS -- tmp.fifo cc2 $DST ) coilcompression_rovir () ( # ROVir based coil compression KSP=$(readlink -f $1) TRJ=$(readlink -f $2) DST=$(readlink -f $3) WORKDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') trap 'rm -rf "$WORKDIR"' EXIT cd "$WORKDIR" || exit mkfifo ksp_tmp.fifo mkfifo meta0.fifo mkfifo meta1.fifo mkfifo meta2.fifo cat $KSP | \ bart tee --out0 meta0.fifo | \ bart tee --out0 meta1.fifo | \ bart tee --out0 meta2.fifo | \ bart copy --stream 1024 -- - ksp_tmp.fifo & READ=$(($(bart show -d 1 meta0.fifo)/2)) PHS=$(bart show -d 2 meta1.fifo) TOT=$(bart show -d 10 meta2.fifo) bart ones 2 20 20 o bart resize -c 0 40 1 40 o pos bart ones 2 25 25 o bart resize -c 0 40 1 40 o t bart ones 2 40 40 o bart saxpy -- -1 t o neg topts=(-o2 -r -D -l -x"$READ" -y"$PHS" -t"$TURNS" -O) bart traj "${topts[@]}" -- - | bart reshape -- $(bart bitmask 2 10) $((TURNS*PHS)) 1 - trj bart scale 2 trj trjos DIMS=40:40:1 bart nufftbase $DIMS trjos pat mkfifo ksp_rovir.fifo mkfifo trj_rovir1.fifo mkfifo trj_rovir2.fifo mkfifo cim1.fifo mkfifo cim2.fifo mkfifo ipos.fifo mkfifo ineg.fifo mkfifo pos.fifo mkfifo neg.fifo mkfifo ksp.fifo mkfifo cc.fifo mkfifo cc_init.fifo mkfifo ksp_cc.fifo mkfifo predelay.fifo cat ksp_tmp.fifo | \ bart tee tmp.fifo | \ bart reshape -s1024 -- $(bart bitmask 2 10) $((PHS*TURNS)) $((TOT/TURNS)) - ksp_rovir.fifo & cat $TRJ | \ bart -t4 -r - scale -- 2 - - | \ bart reshape -s1024 -- $(bart bitmask 2 10) $((PHS*TURNS)) $((TOT/TURNS)) - - | \ bart tee trj_rovir1.fifo trj_rovir2.fifo | \ bart -r - nufft -g -p pat -i -x$DIMS -- - ksp_rovir.fifo - | \ bart tee cim1.fifo > cim2.fifo & bart -r cim1.fifo fmac -- cim1.fifo pos ipos.fifo & bart -r cim2.fifo fmac -- cim2.fifo neg ineg.fifo & bart -t4 -r trj_rovir1.fifo nufft -p pat -- trj_rovir1.fifo ipos.fifo pos.fifo & bart -t4 -r trj_rovir2.fifo nufft -p pat -- trj_rovir2.fifo ineg.fifo neg.fifo & bart -t4 -r pos.fifo rovir -- pos.fifo neg.fifo predelay.fifo & delay 10 $DELAY $DELAY predelay.fifo cc.fifo & bart -r cc.fifo repmat -- 9 $TURNS cc.fifo - | \ bart reshape -s1024 -- $(bart bitmask 9 10) 1 $TOT - - | \ bart -r - ccapply -p$CHANNELS -- tmp.fifo - $DST ) coilcompression_geom () ( # SVD Coil Compression with alignment along time dim. # TRJ is void but provided for easy replacement with rovir KSP=$(readlink -f $1) TRJ=$(readlink -f $2) DST=$(readlink -f $3) WORKDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') trap 'rm -rf "$WORKDIR"' EXIT cd "$WORKDIR" || exit mkfifo tmp.fifo mkfifo ksp_tmp.fifo mkfifo meta0.fifo mkfifo meta1.fifo mkfifo predelay.fifo mkfifo cc.fifo cat $KSP | \ bart tee --out0 meta0.fifo | \ bart tee --out0 meta1.fifo | \ bart copy --stream 1024 -- - ksp_tmp.fifo & PHS=$(bart show -d 2 meta0.fifo) TOT=$(bart show -d 10 meta1.fifo) cat $TRJ > /dev/null & cat ksp_tmp.fifo | \ bart tee tmp.fifo | \ bart reshape -s1024 -- $(bart bitmask 2 10) $((PHS*TURNS)) $((TOT/TURNS)) - - | \ bart -r - cc -M -- - predelay.fifo & delay 10 $DELAY $DELAY predelay.fifo cc.fifo & bart -r cc.fifo repmat -- 9 $TURNS cc.fifo - | \ bart reshape -s1024 -- $(bart bitmask 9 10) 1 $TOT - - | \ bart ccapply -A10 -p$CHANNELS -- tmp.fifo - $DST ) WORKDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir') trap 'rm -rf "$WORKDIR"; kill $(jobs -p) || true' EXIT cd "$WORKDIR" || exit { mkfifo ksp.fifo mkfifo meta0.fifo mkfifo meta1.fifo bart -r $KSP copy -- $KSP - | bart tee --out0 meta0.fifo | bart tee --out0 meta1.fifo -n ksp.fifo & echo "WORKING_DIR: $WORKDIR" >> $LOGFILE echo "k-Space: $KSP" >> $LOGFILE echo "Reconstruction: $REC" >> $LOGFILE READ=$(bart show -d 1 meta0.fifo) PHS1=$(bart show -d 2 meta1.fifo) RDIMS=$((READ/2)) GDIMS=$(echo "scale=0;($RDIMS*$OVERGRIDDING+0.5)/1" | bc -l) bart ones 3 1 $READ $PHS1 pat mkfifo reco.fifo mkfifo ksp_reco.fifo mkfifo trj_reco.fifo mkfifo ksp_gd.fifo mkfifo trj.fifo trajectory ksp_gd.fifo trj.fifo & mkfifo ksp_cc.fifo mkfifo trj_cc.fifo if $ROVIR ; then coilcompression_rovir ksp_cc.fifo trj_cc.fifo ksp_reco.fifo & elif $STATIC_COILS ; then coilcompression_svd_first ksp_cc.fifo trj_cc.fifo ksp_reco.fifo & elif $GEOM; then coilcompression_geom ksp_cc.fifo trj_cc.fifo ksp_reco.fifo & else coilcompression_svd ksp_cc.fifo trj_cc.fifo ksp_reco.fifo & fi cat trj.fifo | bart tee trj_cc.fifo | bart -r - scale -- $OVERGRIDDING - trj_reco.fifo & cat ksp.fifo | bart tee -n ksp_gd.fifo ksp_cc.fifo & bart nlinv --cgiter=10 -S --real-time --fast -g --sens-os=1.25 -i6 -x$GDIMS:$GDIMS:1 -ppat -t trj_reco.fifo -- ksp_reco.fifo - $COILS | \ bart tee $TIME | \ bart -r - flip -- 3 - - | \ bart -r - resize -c -- 0 $RDIMS 1 $RDIMS - reco.fifo & if $FILTER ; then mkfifo reco_fil.fifo filter 5 reco.fifo reco_fil.fifo & bart -r reco_fil.fifo copy -- reco_fil.fifo $REC & else bart -r reco.fifo copy -- reco.fifo $REC & fi } 2>>$LOGFILE wait if [ -f "$COILS.hdr" ]; then bart flip -- 3 $COILS tmp_coils bart resize -c -- 0 $RDIMS 1 $RDIMS tmp_coils $COILS; fi libbart-devel/src/000077500000000000000000000000001472525725500143775ustar00rootroot00000000000000libbart-devel/src/affinereg.c000066400000000000000000000077551472525725500165070ustar00rootroot00000000000000/* Copyright 2024. TU Graz. Institute of Biomedical Imaging. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: Moritz Blumenthal * * References: * * Parzen E. On the estimation of a probability density * function and the mode. Annals of Mathematical Statistics * 1962;33:1065-1076. * * Mattes D, Haynor DR, Vesselle H, Lewellen TK, Eubank W. * PET-CT image registration in the chest using free-form deformations. * IEEE TMI 2023;22:120-8. */ #include #include #include #include #include #include "num/multind.h" #include "num/init.h" #include "num/flpmath.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/opts.h" #include "misc/debug.h" #include "nlops/nlop.h" #include "motion/affine.h" #ifndef DIMS #define DIMS 16 #endif enum AFFINE_TYPE { AFFINE_TRANS, AFFINE_RIGID, AFFINE_ALL }; static const char help_str[] = "Affine registration of reference of and ."; int main_affinereg(int argc, char* argv[argc]) { const char* ref_file = NULL; const char* affine_file = NULL; const char* mov_file = NULL; struct arg_s args[] = { ARG_INFILE(true, &ref_file, "reference"), ARG_INFILE(true, &mov_file, "moved"), ARG_OUTFILE(true, &affine_file, "affine"), }; enum AFFINE_TYPE aff = AFFINE_RIGID; float factors[5] = { 1., 0.5, 0.25, 0.125, 0.0625 }; float sigmas[5] = { 0., 2., 4., 8., 16. }; const char* msk_mov_file = NULL; const char* msk_ref_file = NULL; const struct opt_s opts[] = { OPT_SET('g', &bart_use_gpu, "use gpu (if available)"), OPTL_INFILE(0, "mask-reference", &msk_ref_file, "file", "binary mask for the reference image"), OPTL_INFILE(0, "mask-moved", &msk_mov_file, "file", "binary mask for the moved image"), OPT_SELECT('T', enum AFFINE_TYPE, &aff, AFFINE_TRANS, "Translation"), OPT_SELECT('R', enum AFFINE_TYPE, &aff, AFFINE_RIGID, "Rigid transformation (default)"), OPT_SELECT('A', enum AFFINE_TYPE, &aff, AFFINE_ALL, "All degrees of freedom"), }; cmdline(&argc, argv, ARRAY_SIZE(args), args, help_str, ARRAY_SIZE(opts), opts); num_init_gpu_support(); long rdims[DIMS]; long mdims[DIMS]; complex float* ref_ptr = load_cfl(ref_file, DIMS, rdims); complex float* mov_ptr = load_cfl(mov_file, DIMS, mdims); if ( (0 != (~7ul & md_nontriv_dims(DIMS, rdims))) || (0 != (~7ul & md_nontriv_dims(DIMS, mdims)))) error("Affine registration only supports the first three dimensions.\nUse bart looping for higher dimensions.\n"); md_zabs(DIMS, mdims, mov_ptr, mov_ptr); md_zabs(DIMS, rdims, ref_ptr, ref_ptr); if ((NULL == msk_mov_file) != (NULL == msk_ref_file)) error("Need both masks or none.\n"); complex float* msk_ref_ptr = NULL; complex float* msk_mov_ptr = NULL; if (NULL != msk_mov_file) { long tdims[DIMS]; msk_mov_ptr = load_cfl(msk_mov_file, DIMS, tdims); assert(md_check_equal_dims(DIMS, mdims, tdims, ~0ul)); msk_ref_ptr = load_cfl(msk_ref_file, DIMS, tdims); assert(md_check_equal_dims(DIMS, rdims, tdims, ~0ul)); } const struct nlop_s* trafo; switch (aff) { case AFFINE_ALL: trafo = (1 == rdims[2]) ? nlop_affine_2D() : nlop_affine_3D(); break; case AFFINE_RIGID: trafo = (1 == rdims[2]) ? nlop_affine_rigid_2D() : nlop_affine_rigid_3D(); break; case AFFINE_TRANS: trafo = (1 == rdims[2]) ? nlop_affine_translation_2D() : nlop_affine_translation_3D(); break; default: unreachable(); } long aff_dims[DIMS] = { 3, 4, [ 2 ... DIMS - 1 ] = 1 }; complex float* affine = create_cfl(affine_file, DIMS, aff_dims); affine_init_id(affine); affine_reg(bart_use_gpu, false, affine, trafo, rdims, ref_ptr, msk_ref_ptr, mdims, mov_ptr, msk_mov_ptr, 3, sigmas, factors); nlop_free(trafo); affine_debug(DP_INFO, affine); unmap_cfl(DIMS, mdims, mov_ptr); unmap_cfl(DIMS, rdims, ref_ptr); unmap_cfl(DIMS, aff_dims, affine); if (NULL != msk_mov_file) { unmap_cfl(DIMS, mdims, msk_mov_ptr); unmap_cfl(DIMS, rdims, msk_ref_ptr); } return 0; } libbart-devel/src/avg.c000066400000000000000000000025751472525725500153310ustar00rootroot00000000000000/* Copyright 2014-2016. The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2014 Frank Ong */ #include #include "num/multind.h" #include "num/flpmath.h" #include "num/init.h" #include "misc/mmio.h" #include "misc/opts.h" #ifndef DIMS #define DIMS 16 #endif static const char help_str[] = "Calculates (weighted) average along dimensions specified by bitmask."; int main_avg(int argc, char* argv[argc]) { unsigned long flags = 0; const char* in_file = NULL; const char* out_file = NULL; struct arg_s args[] = { ARG_ULONG(true, &flags, "bitmask"), ARG_INFILE(true, &in_file, "input"), ARG_OUTFILE(true, &out_file, "output"), }; bool wavg = false; const struct opt_s opts[] = { OPT_SET('w', &wavg, "weighted average"), }; cmdline(&argc, argv, ARRAY_SIZE(args), args, help_str, ARRAY_SIZE(opts), opts); num_init(); int N = DIMS; long idims[N]; complex float* data = load_cfl(in_file, N, idims); long odims[N]; md_select_dims(N, ~flags, odims, idims); complex float* out = create_cfl(out_file, N, odims); (wavg ? md_zwavg : md_zavg)(N, idims, flags, out, data); unmap_cfl(N, idims, data); unmap_cfl(N, odims, out); return 0; } libbart-devel/src/bart.c000066400000000000000000000264741472525725500155100ustar00rootroot00000000000000/* Copyright 2015. The Regents of the University of California. * Copyright 2015-2021. Martin Uecker. * Copyright 2018. Damien Nguyen. * Copyright 2023-2024. Institute of Biomedical Imaging. TU Graz. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. */ #include #include #include #include #include #include #include #include #include #ifdef _WIN32 #include "win/fmemopen.h" #include "win/basename_patch.h" #endif #include "misc/io.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/opts.h" #include "misc/version.h" #include "misc/debug.h" #include "misc/cppmap.h" #include "misc/stream.h" #include "num/mpi_ops.h" #include "num/multind.h" #include "num/rand.h" #ifdef USE_MPI #include #endif #ifdef _OPENMP #include #endif #ifdef USE_CUDA #include "num/gpuops.h" #endif #ifdef USE_LOCAL_FFTW #include "fftw3_local.h" #define MANGLE(name) local_ ## name #else #include #define MANGLE(name) name #endif #include "main.h" // also check in commands/ subdir at the bart exe location #define CHECK_EXE_COMMANDS #ifndef DIMS #define DIMS 16 #endif extern FILE* bart_output; // src/misc.c static void bart_exit_cleanup(void) { if (NULL != command_line) XFREE(command_line); io_memory_cleanup(); opt_free_strdup(); stream_unmap_all(); #ifdef FFTWTHREADS MANGLE(fftwf_cleanup_threads)(); #endif #ifdef USE_CUDA cuda_memcache_clear(); #endif #ifdef __EMSCRIPTEN__ wasm_close_fds(); #endif } typedef int (main_fun_t)(int argc, char* argv[]); struct { main_fun_t* main_fun; const char* name; } dispatch_table[] = { #define DENTRY(x) { main_ ## x, # x }, MAP(DENTRY, MAIN_LIST) #undef DENTRY { NULL, NULL } }; static const char help_str[] = "BART. command line flags"; static void usage(void) { printf("BART. Available commands are:"); for (int i = 0; NULL != dispatch_table[i].name; i++) { if (0 == i % 6) printf("\n"); printf("%-12s", dispatch_table[i].name); } printf("\n"); } static int bart_exit(int err_no, const char* exit_msg) { if (0 != err_no) { if (NULL != exit_msg) debug_printf(DP_ERROR, "%s\n", exit_msg); #ifdef USE_MPI MPI_Abort(MPI_COMM_WORLD, err_no); #endif } return err_no; } static void parse_bart_opts(int* argcp, char*** argvp) { int omp_threads = 1; unsigned long flags = 0; unsigned long pflags = 0; long param_start[DIMS] = { [0 ... DIMS - 1] = -1 }; long param_end[DIMS] = { [0 ... DIMS - 1] = -1 }; const char* ref_file = NULL; bool use_mpi = false; bool version = false; bool attach = false; struct arg_s args[] = { }; struct opt_s opts[] = { OPTL_ULONG('l', "loop", &(flags), "flag", "Flag to specify dimensions for looping"), OPTL_ULONG('p', "parallel-loop", &(pflags), "flag", "Flag to specify dimensions for looping and activate parallelization"), OPTL_VECN('s', "start", param_start, "Start index of range for looping (default: 0)"), OPTL_VECN('e', "end", param_end, "End index of range for looping (default: start + 1)"), OPTL_INT('t', "threads", &omp_threads, "nthreads", "Set threads for parallelization"), OPTL_INFILE('r', "ref-file", &ref_file, "", "Obtain loop size from reference file/stream"), OPTL_SET('M', "mpi", &use_mpi, "Initialize MPI"), OPT_SET('S', &mpi_shared_files, "Maps files from each rank (requires shared files system)"), OPTL_SET(0, "version", &version, "print version"), OPTL_ULONG(0, "random-dims", &cfl_loop_rand_flags, "flags", "vary random numbers along selected dimensions (default: all)"), OPT_SET('d', &attach, "(Wait for debugger)"), }; int next_arg = options(argcp, *argvp, "", help_str, ARRAY_SIZE(opts), opts, ARRAY_SIZE(args), args, true); if (version) debug_printf(DP_INFO, "%s\n", bart_version); *argcp -= next_arg; *argvp += next_arg; if (attach) { fprintf(stderr, "PID: %d", getpid()); raise(SIGSTOP); } #ifndef _OPENMP if (omp_threads > 1) { debug_printf(DP_WARN, "WARN: Multiple threads requested, but BART compiled without OPENMP support! Ignoring...\n"); omp_threads = 1; } #endif if (0 != flags && 0 != pflags && flags != pflags) error("Inconsistent use of -p and -l!\n"); flags |= pflags; if (1 == omp_threads && 0 != pflags) omp_threads = 0; const char* ompi_str; if (NULL != (ompi_str = getenv("OMPI_COMM_WORLD_SIZE"))) { unsigned long mpi_ranks = strtoul(ompi_str, NULL, 10); if (1 < mpi_ranks) use_mpi = true; } if (use_mpi) init_mpi(argcp, argvp); if (NULL != ref_file) { long ref_dims[DIMS]; const void* tmp = load_async_cfl(ref_file, DIMS, ref_dims); stream_t s = stream_lookup(tmp); if (NULL == s) { // normal reference file: unmap_cfl(DIMS, ref_dims, tmp); } else { // reference stream: // - input is a pipe so don't close. // - flags are determined by stream dims. if ((0 != flags) || (0 != pflags)) error("--ref-file is a stream, this is currently incompatible with --loop and --parallel-loop options!\n"); flags = stream_get_flags(s); } assert(-1 == param_end[0]); for (int i = 0, ip = 0; i < DIMS; i++) if (MD_IS_SET(flags, i)) param_end[ip++] = ref_dims[i]; } opt_free_strdup(); int nstart = 0; int nend = 0; for(; nstart < DIMS && -1 != param_start[nstart]; nstart++); for(; nend < DIMS && -1 != param_end[nend]; nend++); if (0 != nstart && bitcount(flags) != nstart) error("Size of start values does not coincide with number of selected flags!\n"); if (0 != nend && bitcount(flags) != nend) error("Size of start values does not coincide with number of selected flags!\n"); if (0 == nstart) for (int i = 0; i < bitcount(flags); i++) param_start[i] = 0; if (0 == nend) for (int i = 0; i < bitcount(flags); i++) param_end[i] = param_start[i] + 1; long offs_size[DIMS] = { [0 ... DIMS - 1] = 0 }; long loop_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; for (int i = 0, j = 0; i < DIMS; ++i) { if (MD_IS_SET(flags, i)) { offs_size[i] = param_start[j]; loop_dims[i] = param_end[j] - param_start[j]; j++; } } #ifdef _OPENMP if (0 == omp_threads) { if (NULL == getenv("OMP_NUM_THREADS")) omp_set_num_threads(omp_get_num_procs()); omp_threads = omp_get_max_threads(); } #endif omp_threads = MAX(omp_threads, 1); omp_threads = MIN(omp_threads, md_calc_size(DIMS, loop_dims)); if (1 < mpi_get_num_procs()) omp_threads = 1; init_cfl_loop_desc(DIMS, loop_dims, offs_size, flags, omp_threads, 0); } static int batch_wrapper(main_fun_t* dispatch_func, int argc, char *argv[argc], long pos) { char* thread_argv[argc + 1]; char* thread_argv_save[argc]; for (int m = 0; m < argc; m++) { thread_argv[m] = strdup(argv[m]); thread_argv_save[m] = thread_argv[m]; } thread_argv[argc] = NULL; set_cfl_loop_index(pos); num_rand_init(0ULL); int ret = (*dispatch_func)(argc, thread_argv); io_memory_cleanup(); for (int m = 0; m < argc; ++m) free(thread_argv_save[m]); return ret; } int main_bart(int argc, char* argv[argc]) { #ifdef __EMSCRIPTEN__ wasm_fd_offset = 0; #endif char* bn = basename(argv[0]); // only skip over initial bart or bart.exe. calling "bart bart" is an error. if (0 == strcmp(bn, "bart") || 0 == strcmp(bn, "bart.exe")) { if (1 == argc) { usage(); return -1; } // This advances argv to behind the bart options parse_bart_opts(&argc, &argv); bn = basename(argv[0]); } main_fun_t* dispatch_func = NULL; for (int i = 0; NULL != dispatch_table[i].name; i++) if (0 == strcmp(bn, dispatch_table[i].name)) dispatch_func = dispatch_table[i].main_fun; bool builtin_found = (NULL != dispatch_func); if (builtin_found) { debug_printf(DP_DEBUG3, "Builtin found: %s\n", bn); unsigned int v[5]; version_parse(v, bart_version); if (0 != v[4]) debug_printf(DP_WARN, "BART version %s is not reproducible.\n", bart_version); int final_ret = 0; if (cfl_loop_omp()) { // gomp does only use a thread pool for non-nested parallelism! // Threads are spawned dynamically with a performance penalty for md_functions, // if we have an outer parallel region even if it is inactive. #ifdef USE_CUDA cuda_set_stream_level(); #endif #pragma omp parallel num_threads(cfl_loop_num_workers()) { long start = cfl_loop_worker_id(); long total = cfl_loop_desc_total(); long workers = cfl_loop_num_workers(); for (long i = start; ((i < total) && (0 == final_ret)); i += workers) { int ret = batch_wrapper(dispatch_func, argc, argv, i); if (0 != ret) { #pragma omp critical (main_end_condition) final_ret = ret; bart_exit(ret, "Tool exited with error"); } } } } else { long start = cfl_loop_worker_id(); long total = cfl_loop_desc_total(); long workers = cfl_loop_num_workers(); mpi_signoff_proc(cfl_loop_desc_active() && (mpi_get_rank() >= total)); for (long i = start; ((i < total) && (0 == final_ret)); i += workers) { int ret = batch_wrapper(dispatch_func, argc, argv, i); int tag = ((((i + workers) < total) || (0 != ret)) ? 1 : 0); mpi_signoff_proc(cfl_loop_desc_active() && (0 == tag)); if (0 != ret) { final_ret = ret; bart_exit(ret, "Tool exited with error"); } } } deinit_mpi(); bart_exit_cleanup(); return final_ret; } else { // could not find any builtin // try to find something in commands debug_printf(DP_DEBUG3, "No builtin found: %s\n", argv[0]); #ifdef CHECK_EXE_COMMANDS // also check dirname(PATH_TO_BART)/commands/: char exe_loc[1024] = {0}; ssize_t exe_loc_size = ARRAY_SIZE(exe_loc); ssize_t rl = readlink("/proc/self/exe", exe_loc, (size_t)exe_loc_size); char* exe_dir = NULL; if ((-1 != rl) && (exe_loc_size != rl)) { // readlink returned without error and did not truncate exe_dir = dirname(exe_loc); // no need to check for NULL, as in that case, we skip it in the loop below } #endif const char* tpath[] = { #ifdef CHECK_EXE_COMMANDS exe_dir, #endif getenv("BART_TOOLBOX_PATH"), getenv("TOOLBOX_PATH"), // support old environment variable "/usr/local/lib/bart/", "/usr/lib/bart/", }; for (int i = 0; i < (int)ARRAY_SIZE(tpath); i++) { if (NULL == tpath[i]) continue; size_t len = strlen(tpath[i]) + strlen(bn) + 10 + 1; // extra space for /commands/ and null-terminator char (*cmd)[len] = xmalloc(sizeof *cmd); int r = snprintf(*cmd, len, "%s/commands/%s", tpath[i], bn); if (r >= (int)len) { error("Commandline too long\n"); return bart_exit(1, NULL); // not really needed, error calls abort() } debug_printf(DP_DEBUG3, "Trying: %s\n", cmd); if (-1 == execv(*cmd, argv)) { if (ENOENT != errno) { error("Executing bart command failed\n"); return bart_exit(1, NULL); // not really needed, error calls abort() } } else { assert(0); // unreachable } xfree(cmd); } fprintf(stderr, "Unknown bart command: \"%s\".\n", bn); return bart_exit(-1, NULL); } } int bart_command(int len, char* buf, int argc, char* argv[]) { int save = debug_level; if (NULL != buf) { buf[0] = '\0'; bart_output = fmemopen(buf, (size_t)len, "w"); } int ret = error_catcher(main_bart, argc, argv); bart_exit_cleanup(); debug_level = save; if (NULL != bart_output) { #ifdef _WIN32 rewind(bart_output); fread(buf, 1, len, bart_output); #endif fclose(bart_output); // write final nul bart_output = NULL; } return ret; } libbart-devel/src/bart_embed_api.h000066400000000000000000000117041472525725500174700ustar00rootroot00000000000000#ifndef BART_API_H_INCLUDED #define BART_API_H_INCLUDED #ifdef __cplusplus extern "C" { #endif //! BART's current debug level extern int debug_level; //! Load the content of some in-memory CFL /*! * This function will load the data from some named in-memory CFL and returns * its data. * The dimensions array will get modified to match those from the CFL * * \param name Name used to refer to in-memory CFL * \param D Size of the dimensions array (should be < 16) * \param dimensions Array holding the dimensions of the data * (will get modified) * * \return Pointer to the data or NULL if no matching in-memory CFL file * was found */ void* load_mem_cfl(const char* name, unsigned int D, long dimensions[]); //! Register some memory into the list of in-memory CFL files /*! * This function handles data that was allocated using the C malloc(...) function. * It takes *ownership* of the data and will free it using free(...) * * \param name Name which will be used to refer to the created in-mem CFL * \param D Size of the dimensions array (should be < 16) * \param dimensions Array holding the dimensions of the data * \param ptr Pointer to the data * * \note The underlying data type of ptr is assumed to be complex floats * (complex float or _Complex float) * * \warning Calling this function on data allocated with new[] will result * in undefined behaviour! * * \warning Be aware that if MEMONLY_CFL is not defined, names that do not * end with the '.mem' extension will be unreachable by user code */ void register_mem_cfl_malloc(const char* name, unsigned int D, const long dimensions[], void* ptr); //! Register some memory into the list of in-memory CFL files /*! * This function handles data that was allocated using the C++ new[] operator * It takes *ownership* of the data and will free it using delete[] * * \param name Name which will be used to refer to the created in-mem CFL * \param D Size of the dimensions array (should be < 16) * \param dimensions Array holding the dimensions of the data * \param ptr Pointer to the data * * \note The underlying data type of ptr is assumed to be complex floats * (complex float or _Complex float) * * \warning Calling this function on data allocated with malloc will * result in undefined behaviour! * * \warning Be aware that if MEMONLY_CFL is not defined, names that do not * end with the '.mem' extension will be unreachable by user code */ void register_mem_cfl_new(const char* name, unsigned int D, const long dimensions[], void* ptr); //! Register some memory into the list of in-memory CFL files /*! * This function handles data that was allocated by the user and of which * the user wishes to retain control of its lifetime. * It does *not* takes ownership of the data * * \param name Name which will be used to refer to the created in-mem CFL * \param D Size of the dimensions array (should be < 16) * \param dimensions Array holding the dimensions of the data * \param ptr Pointer to the data * * \note The underlying data type of ptr is assumed to be complex floats * (complex float or _Complex float) * * \warning Be aware that if MEMONLY_CFL is not defined, names that do not * end with the '.mem' extension will be unreachable by user code */ void register_mem_cfl_non_managed(const char* name, unsigned int D, const long dims[], void* ptr); //! BART's main function /*! * This function will execute the BART command specified in argv[0] * * If applicable, the output of the BART command will be returned into * out. This applies to: * - bitmask * - estdims * - estvar * - nrmse * - sdot * - show * - version * * If out is not NULL, outputs of the above commands are redirected to out * * \param len Size of the out buffer * \param out Should be either NULL or point to a valid array of characters * \param argc Same as for the main function * \param argv Same as for the main function * * \warning Be aware that if MEMONLY_CFL is not defined, names that do not * end with the '.mem' extension will be unreachable by user code */ int bart_command(int len, char* out, int argc, char* argv[]); //! Deallocate any memory CFLs /*! * \note It is safe to call this function multiple times. */ void deallocate_all_mem_cfl(); #ifdef __cplusplus } #endif #endif //BART_API_H_INCLUDED libbart-devel/src/bbox.c000066400000000000000000000000601472525725500154710ustar00rootroot00000000000000 #define main_bart main_bbox #include "bart.c" libbart-devel/src/bench.c000066400000000000000000000367631472525725500156410ustar00rootroot00000000000000/* Copyright 2014. The Regents of the University of California. * Copyright 2015-2018. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2014-2018 Martin Uecker * 2014 Jonathan Tamir */ #include #include #include #include #include #include #include "num/multind.h" #include "num/flpmath.h" #include "num/rand.h" #include "num/init.h" #include "num/ops_p.h" #include "num/mdfft.h" #include "num/fft.h" #include "num/ode.h" #include "num/filter.h" #include "wavelet/wavthresh.h" #include "misc/debug.h" #include "misc/misc.h" #include "misc/mmio.h" #include "misc/opts.h" #define DIMS 8 static double bench_generic_copy(long dims[DIMS]) { long strs[DIMS]; md_calc_strides(DIMS, strs, dims, CFL_SIZE); md_calc_strides(DIMS, strs, dims, CFL_SIZE); complex float* x = md_alloc(DIMS, dims, CFL_SIZE); complex float* y = md_alloc(DIMS, dims, CFL_SIZE); md_gaussian_rand(DIMS, dims, x); double tic = timestamp(); md_copy2(DIMS, dims, strs, y, strs, x, CFL_SIZE); double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_generic_matrix_multiply(long dims[DIMS]) { long dimsX[DIMS]; long dimsY[DIMS]; long dimsZ[DIMS]; #if 1 md_select_dims(DIMS, 2 * 3 + 17, dimsX, dims); // 1 110 1 md_select_dims(DIMS, 2 * 6 + 17, dimsY, dims); // 1 011 1 md_select_dims(DIMS, 2 * 5 + 17, dimsZ, dims); // 1 101 1 #else md_select_dims(DIMS, 2 * 5 + 17, dimsZ, dims); // 1 101 1 md_select_dims(DIMS, 2 * 3 + 17, dimsY, dims); // 1 110 1 md_select_dims(DIMS, 2 * 6 + 17, dimsX, dims); // 1 011 1 #endif complex float* x = md_alloc(DIMS, dimsX, CFL_SIZE); complex float* y = md_alloc(DIMS, dimsY, CFL_SIZE); complex float* z = md_alloc(DIMS, dimsZ, CFL_SIZE); md_gaussian_rand(DIMS, dimsX, x); md_gaussian_rand(DIMS, dimsY, y); double tic = timestamp(); md_ztenmul(DIMS, dimsZ, z, dimsX, x, dimsY, y); double toc = timestamp(); md_free(x); md_free(y); md_free(z); return toc - tic; } static double bench_generic_add(long dims[DIMS], unsigned long flags, bool forloop) { long dimsX[DIMS]; long dimsY[DIMS]; long dimsC[DIMS]; md_select_dims(DIMS, flags, dimsX, dims); md_select_dims(DIMS, ~flags, dimsC, dims); md_select_dims(DIMS, ~0UL, dimsY, dims); long strsX[DIMS]; long strsY[DIMS]; md_calc_strides(DIMS, strsX, dimsX, CFL_SIZE); md_calc_strides(DIMS, strsY, dimsY, CFL_SIZE); complex float* x = md_alloc(DIMS, dimsX, CFL_SIZE); complex float* y = md_alloc(DIMS, dimsY, CFL_SIZE); md_gaussian_rand(DIMS, dimsX, x); md_gaussian_rand(DIMS, dimsY, y); long L = md_calc_size(DIMS, dimsC); long T = md_calc_size(DIMS, dimsX); double tic = timestamp(); if (forloop) { for (long i = 0; i < L; i++) { for (long j = 0; j < T; j++) y[i + j * L] += x[j]; } } else { md_zaxpy2(DIMS, dims, strsY, y, 1., strsX, x); } double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_generic_sum(long dims[DIMS], unsigned long flags, bool forloop) { long dimsX[DIMS]; long dimsY[DIMS]; long dimsC[DIMS]; md_select_dims(DIMS, ~0UL, dimsX, dims); md_select_dims(DIMS, flags, dimsY, dims); md_select_dims(DIMS, ~flags, dimsC, dims); long strsX[DIMS]; long strsY[DIMS]; md_calc_strides(DIMS, strsX, dimsX, CFL_SIZE); md_calc_strides(DIMS, strsY, dimsY, CFL_SIZE); complex float* x = md_alloc(DIMS, dimsX, CFL_SIZE); complex float* y = md_alloc(DIMS, dimsY, CFL_SIZE); md_gaussian_rand(DIMS, dimsX, x); md_clear(DIMS, dimsY, y, CFL_SIZE); long L = md_calc_size(DIMS, dimsC); long T = md_calc_size(DIMS, dimsY); double tic = timestamp(); if (forloop) { for (long i = 0; i < L; i++) { for (long j = 0; j < T; j++) y[j] = y[j] + x[i + j * L]; } } else { md_zaxpy2(DIMS, dims, strsY, y, 1., strsX, x); } double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_copy1(long scale) { long dims[DIMS] = { 1, 128 * scale, 128 * scale, 1, 1, 16, 1, 16 }; return bench_generic_copy(dims); } static double bench_copy2(long scale) { long dims[DIMS] = { 262144 * scale, 16, 1, 1, 1, 1, 1, 1 }; return bench_generic_copy(dims); } static double bench_matrix_mult(long scale) { long dims[DIMS] = { 1, 256 * scale, 256 * scale, 256 * scale, 1, 1, 1, 1 }; return bench_generic_matrix_multiply(dims); } static double bench_batch_matmul1(long scale) { long dims[DIMS] = { 30000 * scale, 8, 8, 8, 1, 1, 1, 1 }; return bench_generic_matrix_multiply(dims); } static double bench_batch_matmul2(long scale) { long dims[DIMS] = { 1, 8, 8, 8, 30000 * scale, 1, 1, 1 }; return bench_generic_matrix_multiply(dims); } static double bench_tall_matmul1(long scale) { long dims[DIMS] = { 1, 8, 8, 100000 * scale, 1, 1, 1, 1 }; return bench_generic_matrix_multiply(dims); } static double bench_tall_matmul2(long scale) { long dims[DIMS] = { 1, 100000 * scale, 8, 8, 1, 1, 1, 1 }; return bench_generic_matrix_multiply(dims); } static double bench_add(long scale) { long dims[DIMS] = { 65536 * scale, 1, 50 * scale, 1, 1, 1, 1, 1 }; return bench_generic_add(dims, MD_BIT(2), false); } static double bench_addf(long scale) { long dims[DIMS] = { 65536 * scale, 1, 50 * scale, 1, 1, 1, 1, 1 }; return bench_generic_add(dims, MD_BIT(2), true); } static double bench_add2(long scale) { long dims[DIMS] = { 50 * scale, 1, 65536 * scale, 1, 1, 1, 1, 1 }; return bench_generic_add(dims, MD_BIT(0), false); } static double bench_sum2(long scale) { long dims[DIMS] = { 50 * scale, 1, 65536 * scale, 1, 1, 1, 1, 1 }; return bench_generic_sum(dims, MD_BIT(0), false); } static double bench_sum(long scale) { long dims[DIMS] = { 65536 * scale, 1, 50 * scale, 1, 1, 1, 1, 1 }; return bench_generic_sum(dims, MD_BIT(2), false); } static double bench_sumf(long scale) { long dims[DIMS] = { 65536 * scale, 1, 50 * scale, 1, 1, 1, 1, 1 }; return bench_generic_sum(dims, MD_BIT(2), true); } static double bench_zmul(long scale) { long dimsx[DIMS] = { 256, 256, 1, 1, 90 * scale, 1, 1, 1 }; long dimsy[DIMS] = { 256, 256, 1, 1, 1, 1, 1, 1 }; long dimsz[DIMS] = { 1, 1, 1, 1, 90 * scale, 1, 1, 1 }; complex float* x = md_alloc(DIMS, dimsx, CFL_SIZE); complex float* y = md_alloc(DIMS, dimsy, CFL_SIZE); complex float* z = md_alloc(DIMS, dimsz, CFL_SIZE); md_gaussian_rand(DIMS, dimsy, y); md_gaussian_rand(DIMS, dimsz, z); long strsx[DIMS]; long strsy[DIMS]; long strsz[DIMS]; md_calc_strides(DIMS, strsx, dimsx, CFL_SIZE); md_calc_strides(DIMS, strsy, dimsy, CFL_SIZE); md_calc_strides(DIMS, strsz, dimsz, CFL_SIZE); double tic = timestamp(); md_zmul2(DIMS, dimsx, strsx, x, strsy, y, strsz, z); double toc = timestamp(); md_free(x); md_free(y); md_free(z); return toc - tic; } static double bench_transpose(long scale) { long dims[DIMS] = { 2000 * scale, 2000 * scale, 1, 1, 1, 1, 1, 1 }; complex float* x = md_alloc(DIMS, dims, CFL_SIZE); complex float* y = md_alloc(DIMS, dims, CFL_SIZE); md_gaussian_rand(DIMS, dims, x); md_clear(DIMS, dims, y, CFL_SIZE); double tic = timestamp(); md_transpose(DIMS, 0, 1, dims, y, dims, x, CFL_SIZE); double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_resize(long scale) { long dimsX[DIMS] = { 2000 * scale, 1000 * scale, 1, 1, 1, 1, 1, 1 }; long dimsY[DIMS] = { 1000 * scale, 2000 * scale, 1, 1, 1, 1, 1, 1 }; complex float* x = md_alloc(DIMS, dimsX, CFL_SIZE); complex float* y = md_alloc(DIMS, dimsY, CFL_SIZE); md_gaussian_rand(DIMS, dimsX, x); md_clear(DIMS, dimsY, y, CFL_SIZE); double tic = timestamp(); md_resize(DIMS, dimsY, y, dimsX, x, CFL_SIZE); double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_norm(int s, long scale) { long dims[DIMS] = { 256 * scale, 256 * scale, 1, 16, 1, 1, 1, 1 }; #if 0 complex float* x = md_alloc_gpu(DIMS, dims, CFL_SIZE); complex float* y = md_alloc_gpu(DIMS, dims, CFL_SIZE); #else complex float* x = md_alloc(DIMS, dims, CFL_SIZE); complex float* y = md_alloc(DIMS, dims, CFL_SIZE); #endif md_gaussian_rand(DIMS, dims, x); md_gaussian_rand(DIMS, dims, y); double tic = timestamp(); switch (s) { case 0: md_zscalar(DIMS, dims, x, y); break; case 1: md_zscalar_real(DIMS, dims, x, y); break; case 2: md_znorm(DIMS, dims, x); break; case 3: md_z1norm(DIMS, dims, x); break; } double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_zscalar(long scale) { return bench_norm(0, scale); } static double bench_zscalar_real(long scale) { return bench_norm(1, scale); } static double bench_znorm(long scale) { return bench_norm(2, scale); } static double bench_zl1norm(long scale) { return bench_norm(3, scale); } static double bench_wavelet(long scale) { long dims[DIMS] = { 1, 256 * scale, 256 * scale, 1, 16, 1, 1, 1 }; long minsize[DIMS] = { [0 ... DIMS - 1] = 1 }; minsize[0] = MIN(dims[0], 16); minsize[1] = MIN(dims[1], 16); minsize[2] = MIN(dims[2], 16); const struct operator_p_s* p = prox_wavelet_thresh_create(DIMS, dims, 6, 0u, WAVELET_DAU2, minsize, 1.1, true); complex float* x = md_alloc(DIMS, dims, CFL_SIZE); md_gaussian_rand(DIMS, dims, x); double tic = timestamp(); operator_p_apply(p, 0.98, DIMS, dims, x, DIMS, dims, x); double toc = timestamp(); md_free(x); operator_p_free(p); return toc - tic; } static double bench_generic_mdfft(long dims[DIMS], unsigned long flags) { complex float* x = md_alloc(DIMS, dims, CFL_SIZE); complex float* y = md_alloc(DIMS, dims, CFL_SIZE); md_gaussian_rand(DIMS, dims, x); double tic = timestamp(); md_fft(DIMS, dims, flags, 0u, y, x); double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_mdfft(long scale) { long dims[DIMS] = { 1, 128 * scale, 128 * scale, 1, 1, 4, 1, 4 }; return bench_generic_mdfft(dims, 6ul); } static double bench_generic_fft(long dims[DIMS], unsigned long flags) { complex float* x = md_alloc(DIMS, dims, CFL_SIZE); complex float* y = md_alloc(DIMS, dims, CFL_SIZE); md_gaussian_rand(DIMS, dims, x); double tic = timestamp(); fft(DIMS, dims, flags, y, x); double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_fft(long scale) { long dims[DIMS] = { 1, 256 * scale, 256 * scale, 1, 1, 16, 1, 8 }; return bench_generic_fft(dims, 6ul); } static double bench_generic_fftmod(long dims[DIMS], unsigned long flags) { complex float* x = md_alloc(DIMS, dims, CFL_SIZE); complex float* y = md_alloc(DIMS, dims, CFL_SIZE); md_gaussian_rand(DIMS, dims, x); double tic = timestamp(); fftmod(DIMS, dims, flags, y, x); double toc = timestamp(); md_free(x); md_free(y); return toc - tic; } static double bench_fftmod(long scale) { long dims[DIMS] = { 1, 256 * scale, 256 * scale, 1, 1, 16, 1, 16 }; return bench_generic_fftmod(dims, 6ul); } enum bench_typ { BENCH_ZFILL, BENCH_ZSMUL, BENCH_LINPHASE }; static double bench_generic_expand(enum bench_typ typ, long scale) { long dims[DIMS] = { 1, 256 * scale, 256 * scale, 1, 1, 16, 1, 16 }; float linphase_pos[DIMS] = { 0.5, 0.1 }; complex float* x = md_alloc(DIMS, dims, CFL_SIZE); double tic = timestamp(); switch (typ) { case BENCH_ZFILL: md_zfill(DIMS, dims, x, 1.); break; case BENCH_ZSMUL: md_zsmul(DIMS, dims, x, x, 1.); break; case BENCH_LINPHASE: linear_phase(DIMS, dims, linphase_pos, x); break; default: assert(0); } double toc = timestamp(); md_free(x); return toc - tic; } static double bench_zfill(long scale) { return bench_generic_expand(BENCH_ZFILL, scale); } static double bench_zsmul(long scale) { return bench_generic_expand(BENCH_ZSMUL, scale); } static double bench_linphase(long scale) { return bench_generic_expand(BENCH_LINPHASE, scale); } static double bench_ode(long scale) { float mat[2][2] = { { 0., +1. }, { -1., 0. } }; float x[2] = { 1., 0. }; float h = 10.; float tol = 1.E-6; double tic = timestamp(); ode_matrix_interval(h, tol, 2, x, 0., scale * 10001. * M_PI, mat); double err = pow(fabs(x[0] + 1.), 2.) + pow(fabs(x[1] - 0.), 2.); assert(err < 1.E-2); double toc = timestamp(); return toc - tic; } enum bench_indices { REPETITION_IND, SCALE_IND, THREADS_IND, TESTS_IND, BENCH_DIMS }; typedef double (*bench_fun)(long scale); static void do_test(const long dims[BENCH_DIMS], complex float* out, long scale, bench_fun fun, const char* str) { printf("%30.30s |", str); int N = (int)dims[REPETITION_IND]; double sum = 0.; double min = 1.E10; double max = 0.; for (int i = 0; i < N; i++) { double dt = fun(scale); sum += dt; min = MIN(dt, min); max = MAX(dt, max); printf(" %3.4f", (float)dt); fflush(stdout); assert(0 == REPETITION_IND); out[i] = dt; } printf(" | Avg: %3.4f Max: %3.4f Min: %3.4f\n", (float)(sum / N), max, min); } const struct benchmark_s { bench_fun fun; const char* str; } benchmarks[] = { { bench_add, "add (md_zaxpy)" }, { bench_add2, "add (md_zaxpy), contiguous" }, { bench_addf, "add (for loop)" }, { bench_sum, "sum (md_zaxpy)" }, { bench_sum2, "sum (md_zaxpy), contiguous" }, { bench_sumf, "sum (for loop)" }, { bench_zmul, "complex mult. (md_zmul2)" }, { bench_transpose, "complex transpose" }, { bench_resize, "complex resize" }, { bench_matrix_mult, "complex matrix multiply" }, { bench_batch_matmul1, "batch matrix multiply 1" }, { bench_batch_matmul2, "batch matrix multiply 2" }, { bench_tall_matmul1, "tall matrix multiply 1" }, { bench_tall_matmul2, "tall matrix multiply 2" }, { bench_zscalar, "complex dot product" }, { bench_zscalar, "complex dot product" }, { bench_zscalar_real, "real complex dot product" }, { bench_znorm, "l2 norm" }, { bench_zl1norm, "l1 norm" }, { bench_copy1, "copy 1" }, { bench_copy2, "copy 2" }, { bench_zfill, "complex fill" }, { bench_zsmul, "complex scalar multiplication" }, { bench_linphase, "linear phase" }, { bench_wavelet, "wavelet soft thresh" }, { bench_mdfft, "(MD-)FFT" }, { bench_fft, "FFT" }, { bench_fftmod, "fftmod" }, { bench_ode, "ODE" }, }; static const char help_str[] = "Performs a series of micro-benchmarks."; int main_bench(int argc, char* argv[argc]) { const char* out_file = NULL; struct arg_s args[] = { ARG_OUTFILE(false, &out_file, "output"), }; bool threads = false; bool scaling = false; unsigned long flags = ~0UL; const struct opt_s opts[] = { OPT_SET('T', &threads, "varying number of threads"), OPT_SET('S', &scaling, "varying problem size"), OPT_ULONG('s', &flags, "flags", "select benchmarks"), }; cmdline(&argc, argv, ARRAY_SIZE(args), args, help_str, ARRAY_SIZE(opts), opts); long dims[BENCH_DIMS] = MD_INIT_ARRAY(BENCH_DIMS, 1); long strs[BENCH_DIMS]; long pos[BENCH_DIMS] = { 0 }; dims[REPETITION_IND] = 5; dims[THREADS_IND] = threads ? 8 : 1; dims[SCALE_IND] = scaling ? 5 : 1; dims[TESTS_IND] = sizeof(benchmarks) / sizeof(benchmarks[0]); md_calc_strides(BENCH_DIMS, strs, dims, CFL_SIZE); bool outp = (NULL != out_file); complex float* out = (outp ? create_cfl : anon_cfl)(out_file, BENCH_DIMS, dims); num_init(); md_clear(BENCH_DIMS, dims, out, CFL_SIZE); do { if (!(flags & MD_BIT(pos[TESTS_IND]))) continue; if (threads) { num_set_num_threads((int)pos[THREADS_IND] + 1); debug_printf(DP_INFO, "%02d threads. ", pos[THREADS_IND] + 1); } do_test(dims, &MD_ACCESS(BENCH_DIMS, strs, pos, out), pos[SCALE_IND] + 1, benchmarks[pos[TESTS_IND]].fun, benchmarks[pos[TESTS_IND]].str); } while (md_next(BENCH_DIMS, dims, ~MD_BIT(REPETITION_IND), pos)); unmap_cfl(BENCH_DIMS, dims, out); return 0; } libbart-devel/src/bin.c000066400000000000000000000244271472525725500153240ustar00rootroot00000000000000/* Copyright 2020. Uecker Lab. University Medical Center Göttingen. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2018-2020 Sebastian Rosenzweig * 2020 Martin Uecker */ #include #include #include #include #include "num/multind.h" #include "num/init.h" #include "num/flpmath.h" #include "num/filter.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/mri.h" #include "misc/opts.h" #include "misc/debug.h" #include "calib/bin.h" /* Reorder binning: [-o] * -------------------- * * Input a 1D file with at the dimension that * you want to reorder according to the label order. * * * Label binning: [-l long] * ------------------------ * * Bin a dimension according to the label-file * The label file must be 1D and the dimension of the