diff --git a/.github/workflows/toolchain_quick.yaml b/.github/workflows/toolchain_quick.yaml index 79cd3c8ab19..3c2b58a8106 100644 --- a/.github/workflows/toolchain_quick.yaml +++ b/.github/workflows/toolchain_quick.yaml @@ -3,13 +3,13 @@ on: pull_request: paths: - toolchain/** - - .github/workflows/toolchain_quick.yml + - .github/workflows/toolchain_quick.yaml push: branches: - develop paths: - toolchain/** - - .github/workflows/toolchain_quick.yml + - .github/workflows/toolchain_quick.yaml workflow_dispatch: jobs: lint-and-sanity: @@ -43,7 +43,7 @@ jobs: shellcheck --version find toolchain -type f -name '*.sh' -print0 | xargs -0 -n1 shellcheck -x || true - dry-run-matrix: + pack-run-matrix: runs-on: ubuntu-latest strategy: fail-fast: false @@ -56,27 +56,26 @@ jobs: run: | sudo apt-get update sudo apt-get install -y dos2unix - - name: Dry run + - name: Pack run run: | set -euo pipefail cd toolchain rm -rf build install find . -type f -name '*.sh' -exec chmod +x {} + if [ "${{ matrix.variant }}" = "gnu" ]; then - ./toolchain_gnu.sh --dry-run --skip-system-checks + ./toolchain_gnu.sh --pack-run --skip-system-checks elif [ "${{ matrix.variant }}" = "intel" ]; then - ./toolchain_intel.sh --dry-run --skip-system-checks + ./toolchain_intel.sh --pack-run --skip-system-checks elif [ "${{ matrix.variant }}" = "cuda" ]; then - ./toolchain_gnu.sh --dry-run --skip-system-checks --enable-cuda --gpu-ver 70 + ./toolchain_gnu.sh --pack-run --skip-system-checks --enable-cuda --gpu-ver 70 fi - name: Upload setup if: always() uses: actions/upload-artifact@v7 with: - name: toolchain-${{ matrix.variant }}-dryrun + name: toolchain-${{ matrix.variant }}-packrun path: | toolchain/install/setup toolchain/compile.log toolchain/compile.err if-no-files-found: ignore - diff --git a/docs/advanced/install.md b/docs/advanced/install.md index fb237625a26..0df93f602a5 100644 --- a/docs/advanced/install.md +++ b/docs/advanced/install.md @@ -58,7 +58,7 @@ cmake -B build -DDeePMD_DIR=/dir_to_deepmd-kit -DTorch_DIR=/dir_to_pytorch ## Build with NEP This interface enables running MD simulations with the NEP model. It requires the [NEP_CPU](https://github.com/brucefan1983/NEP_CPU) library, which can be easily installed using toolchain as shown below: ```bash -./install_abacus_toolchain.sh --with-nep=install +./install_abacus_toolchain_new.sh --with-nep=install ``` To build ABACUS: diff --git a/toolchain/README.md b/toolchain/README.md index e0f1380624c..5f982a039e3 100644 --- a/toolchain/README.md +++ b/toolchain/README.md @@ -1,6 +1,6 @@ # ABACUS Toolchain -[![Version](https://img.shields.io/badge/version-2025.3-blue.svg)](https://github.com/deepmodeling/abacus-develop/tree/develop/toolchain) +[![Version](https://img.shields.io/badge/version-2026.1-blue.svg)](https://github.com/deepmodeling/abacus-develop/tree/develop/toolchain) [![License](https://img.shields.io/badge/license-GPL--compatible-green.svg)](#license) [![Platform](https://img.shields.io/badge/platform-Linux-lightgrey.svg)]() @@ -139,12 +139,23 @@ For air-gapped systems or unreliable internet: # 1. Create build directory and download packages mkdir build # Download required packages to build/ directory with proper naming -# e.g., fftw-3.3.10.tar.gz, openmpi-5.0.8.tar.bz2 +# e.g., fftw-3.3.11.tar.gz, openmpi-5.0.10.tar.bz2 # 2. Run toolchain (will detect local packages) ./toolchain_gnu.sh ``` +The downloading process can be facilitated via `./toolchain_gnu.sh --pack-run`. + +Also, for users in China, we provide a Gitee mirror repository with pre-downloaded packages: +```bash +# Clone the Gitee repository in toolchain directory +git clone https://gitee.com/jamesmisaka/abacus_toolchain_build.git +# Move packages to build directory +mv abacus_toolchain_build/* build/ +# Then run toolchain normally +``` + ### Hybrid Installation Mix online and offline packages as needed - the toolchain automatically detects locally available packages and downloads missing ones. @@ -178,20 +189,20 @@ Mix online and offline packages as needed - the toolchain automatically detects | CMake | 3.31.7 / 3.30.5 | Build system | BSD-3-Clause | Install | | GCC | 13.2.0 / 11.4.0 | C/C++ compiler | GPL-3.0-or-later WITH GCC-exception-3.1 | Install | | **MPI Libraries** ||||| -| OpenMPI | 5.0.8 / 4.1.6 | MPI implementation | BSD-3-Clause-Open-MPI | Install | -| MPICH | 4.3.1 / 4.1.0 | Alternative MPI | mpich2 (BSD-like) | Alternative | +| OpenMPI | 5.0.10 / 4.1.8 | MPI implementation | BSD-3-Clause-Open-MPI | Install | +| MPICH | 5.0.1 / 4.3.2 | Alternative MPI | mpich2 (BSD-like) | Alternative | | **Math Libraries** ||||| -| OpenBLAS | 0.3.30 / 0.3.27 | Linear algebra | BSD-3-Clause | Install | -| ScaLAPACK | 2.2.2 / 2.2.1 | Parallel linear algebra | BSD-3-Clause | Install | +| OpenBLAS | 0.3.33 / 0.3.30 | Linear algebra | BSD-3-Clause | Install | +| ScaLAPACK | 2.2.3 / 2.2.1 | Parallel linear algebra | BSD-3-Clause | Install | | **Scientific Libraries** ||||| -| FFTW | 3.3.10 / 3.3.10 | Fast Fourier Transform | GPL-2.0-or-later | Install | +| FFTW | 3.3.11 / 3.3.10 | Fast Fourier Transform | GPL-2.0-or-later | Install | | LibXC | 7.0.0 / 6.2.2 | Exchange-correlation | MPL-2.0 | Install | -| ELPA | 2025.06.001 / 2024.05.001 | Eigenvalue solver | LGPL-3.0-only | Install | +| ELPA | 2026.02.001 / 2024.05.001 | Eigenvalue solver | LGPL-3.0-only | Install | | **Advanced Features** ||||| -| Cereal | master | C++ Serialization | BSD | Install | -| RapidJSON | master | JSON parsing | MIT | Install | -| LibRI | master | EXX calculations | GPL-3.0 | Install | -| LibComm | master | EXX calculations | GPL-3.0 | Install | +| Cereal | pinned commit | C++ Serialization | BSD | Install | +| RapidJSON | pinned commit | JSON parsing | MIT | Install | +| LibRI | pinned commit | EXX calculations | GPL-3.0 | Install | +| LibComm | pinned commit | EXX calculations | GPL-3.0 | Install | | LibTorch | 2.1.2 / 1.12.1 | MLALGO support | BSD-3-Clause | Optional | | LibNPY | 1.0.1 / 1.0.1 | NumPy I/O | MIT | Optional | | NEP | main | Neuroevolution potential | MIT | Optional | @@ -226,11 +237,11 @@ One can also manually edit the `toolchain_gnu.sh` for selecting specific version # Refer to scripts/package_versions.sh for specific version numbers CMAKE_VERSION="main" # main=3.31.7, alt=3.30.5 -OPENMPI_VERSION="main" # main=5.0.8, alt=4.1.6 -OPENBLAS_VERSION="main" # main=0.3.30, alt=0.3.27 -ELPA_VERSION="main" # main=2025.06.001, alt=2024.05.001 +OPENMPI_VERSION="main" # main=5.0.10, alt=4.1.8 +OPENBLAS_VERSION="main" # main=0.3.33, alt=0.3.30 +ELPA_VERSION="main" # main=2026.02.001, alt=2024.05.001 LIBXC_VERSION="main" # main=7.0.0, alt=6.2.2 -SCALAPACK_VERSION="main" # main=2.2.2, alt=2.2.1 +SCALAPACK_VERSION="main" # main=2.2.3, alt=2.2.1 # Optional Libraries LIBTORCH_VERSION="main" # main=2.1.2, alt=1.12.1 (use alt for older GLIBC) ``` @@ -467,19 +478,6 @@ NPROCS_OVERWRITE=4 ./toolchain_gnu.sh --with-gcc --with-openmpi - **CI/CD environments**: Match container resource limits - **Debugging**: Use single-core compilation for clearer error messages -### Legacy Script Options - -The deprecated `install_abacus_toolchain.sh` supports additional options: - -| Option | Description | Availability | -|--------|-------------|--------------| -| `--dry-run` | Test configuration without installation | ✅ New & Legacy | -| `--pack-run` | Download packages without building | ✅ New & Legacy | -| `--no-check-certificate` | Skip SSL certificate verification | ⚠️ Legacy only (use `DOWNLOAD_CERT_POLICY=skip`) | -| `-j N` | Limit parallel compilation processes | ⚠️ Legacy only (use `NPROCS_OVERWRITE=N`) | - -> **Migration Note**: The new toolchain system (`toolchain_*.sh` scripts) is recommended over the legacy `install_abacus_toolchain.sh`. Legacy options like `--no-check-certificate` and `-j N` are replaced by environment variables `DOWNLOAD_CERT_POLICY` and `NPROCS_OVERWRITE` respectively. - ### Environment Management The toolchain generates several setup files: @@ -509,7 +507,6 @@ scripts/ | File | Purpose | |------|---------| | `install_abacus_toolchain_new.sh` | Main orchestration script (new version) | -| `install_abacus_toolchain.sh` | Legacy main script (deprecated) | | `toolchain_*.sh` | Frontend scripts for specific toolchains | | `scripts/lib/config_manager.sh` | Configuration management | | `scripts/lib/package_manager.sh` | Package installation logic | @@ -517,7 +514,7 @@ scripts/ | `scripts/common_vars.sh` | Shared variables and defaults | | `scripts/tool_kit.sh` | Utility functions and macros | | `scripts/parse_if.py` | Parser for IF_XYZ constructs | -| `checksums.sha256` | Pre-calculated SHA256 checksums for packages | +| `install//install_successful` | Per-package install lock/checksum file generated by `write_checksums` | ### Script Structure Details diff --git a/toolchain/build_abacus_aocc-aocl.sh b/toolchain/build_abacus_aocc-aocl.sh index ac174db0997..63c29b6eb2d 100755 --- a/toolchain/build_abacus_aocc-aocl.sh +++ b/toolchain/build_abacus_aocc-aocl.sh @@ -22,16 +22,15 @@ BUILD_DIR=build_abacus_aocc_aocl rm -rf $BUILD_DIR PREFIX=$ABACUS_DIR -ELPA=$INSTALL_DIR/elpa-2025.06.001/cpu -# ELPA=$INSTALL_DIR/elpa-2025.06.001/nvidia # for elpa-gpu -CEREAL=$INSTALL_DIR/cereal-master/include/cereal -LIBXC=$INSTALL_DIR/libxc-7.0.0 -RAPIDJSON=$INSTALL_DIR/rapidjson-master +ELPA=${ELPA_ROOT} +CEREAL=${CEREAL_ROOT}/include +LIBXC=${LIBXC_ROOT} +RAPIDJSON=${RAPIDJSON_ROOT} LAPACK=$AOCLhome/lib SCALAPACK=$AOCLhome/lib FFTW3=$AOCLhome -LIBRI=$INSTALL_DIR/LibRI-master -LIBCOMM=$INSTALL_DIR/LibComm-master +LIBRI=${LIBRI_ROOT} +LIBCOMM=${LIBCOMM_ROOT} USE_CUDA=OFF # set ON to enable gpu-abacus # NEP_DIR=$INSTALL_DIR/NEP_CPU-main # LIBTORCH=$INSTALL_DIR/libtorch-2.1.2/share/cmake/Torch @@ -83,4 +82,4 @@ cat << EOF Done! To use the installed ABACUS version You need to source ${TOOL}/abacus_env.sh first ! -EOF \ No newline at end of file +EOF diff --git a/toolchain/build_abacus_gcc-aocl.sh b/toolchain/build_abacus_gcc-aocl.sh index e2515f609ba..1faf1843d25 100755 --- a/toolchain/build_abacus_gcc-aocl.sh +++ b/toolchain/build_abacus_gcc-aocl.sh @@ -22,16 +22,15 @@ BUILD_DIR=build_abacus_gcc_aocl rm -rf $BUILD_DIR PREFIX=$ABACUS_DIR -ELPA=$INSTALL_DIR/elpa-2025.06.001/cpu -# ELPA=$INSTALL_DIR/elpa-2025.06.001/nvidia # for elpa-gpu -CEREAL=$INSTALL_DIR/cereal-master/include/cereal -LIBXC=$INSTALL_DIR/libxc-7.0.0 -RAPIDJSON=$INSTALL_DIR/rapidjson-master +ELPA=${ELPA_ROOT} +CEREAL=${CEREAL_ROOT}/include +LIBXC=${LIBXC_ROOT} +RAPIDJSON=${RAPIDJSON_ROOT} LAPACK=$AOCLhome/lib SCALAPACK=$AOCLhome/lib FFTW3=$AOCLhome -LIBRI=$INSTALL_DIR/LibRI-master -LIBCOMM=$INSTALL_DIR/LibComm-master +LIBRI=${LIBRI_ROOT} +LIBCOMM=${LIBCOMM_ROOT} USE_CUDA=OFF # set ON to enable gpu-abacus # NEP_DIR=$INSTALL_DIR/NEP_CPU-main # LIBTORCH=$INSTALL_DIR/libtorch-2.1.2/share/cmake/Torch @@ -81,4 +80,4 @@ cat << EOF Done! To use the installed ABACUS version You need to source ${TOOL}/abacus_env.sh first ! -EOF \ No newline at end of file +EOF diff --git a/toolchain/build_abacus_gcc-mkl.sh b/toolchain/build_abacus_gcc-mkl.sh index 21f1fa05b4e..e35f2be558f 100755 --- a/toolchain/build_abacus_gcc-mkl.sh +++ b/toolchain/build_abacus_gcc-mkl.sh @@ -22,13 +22,12 @@ BUILD_DIR=build_abacus_gcc_mkl rm -rf $BUILD_DIR PREFIX=$ABACUS_DIR -ELPA=$INSTALL_DIR/elpa-2025.06.001/cpu -# ELPA=$INSTALL_DIR/elpa-2025.06.001/nvidia # for elpa-gpu -CEREAL=$INSTALL_DIR/cereal-master/include/cereal -LIBXC=$INSTALL_DIR/libxc-7.0.0 -RAPIDJSON=$INSTALL_DIR/rapidjson-master -LIBRI=$INSTALL_DIR/LibRI-master -LIBCOMM=$INSTALL_DIR/LibComm-master +ELPA=${ELPA_ROOT} +CEREAL=${CEREAL_ROOT}/include +LIBXC=${LIBXC_ROOT} +RAPIDJSON=${RAPIDJSON_ROOT} +LIBRI=${LIBRI_ROOT} +LIBCOMM=${LIBCOMM_ROOT} USE_CUDA=OFF # set ON to enable gpu-abacus # NEP_DIR=$INSTALL_DIR/NEP_CPU-main # LIBTORCH=$INSTALL_DIR/libtorch-2.1.2/share/cmake/Torch diff --git a/toolchain/build_abacus_gnu.sh b/toolchain/build_abacus_gnu.sh index 0485738c1ed..69cf152c6ca 100755 --- a/toolchain/build_abacus_gnu.sh +++ b/toolchain/build_abacus_gnu.sh @@ -20,16 +20,15 @@ BUILD_DIR=build_abacus_gnu rm -rf $BUILD_DIR PREFIX=$ABACUS_DIR -LAPACK=$INSTALL_DIR/openblas-0.3.30/lib -SCALAPACK=$INSTALL_DIR/scalapack-2.2.2/lib -ELPA=$INSTALL_DIR/elpa-2025.06.001/cpu -# ELPA=$INSTALL_DIR/elpa-2025.06.001/nvidia # for elpa-gpu -FFTW3=$INSTALL_DIR/fftw-3.3.10 -CEREAL=$INSTALL_DIR/cereal-master/include/cereal -LIBXC=$INSTALL_DIR/libxc-7.0.0 -RAPIDJSON=$INSTALL_DIR/rapidjson-master/ -LIBRI=$INSTALL_DIR/LibRI-master -LIBCOMM=$INSTALL_DIR/LibComm-master +LAPACK=${OPENBLAS_ROOT}/lib +SCALAPACK=${SCALAPACK_ROOT}/lib +ELPA=${ELPA_ROOT} +FFTW3=${FFTW_ROOT} +CEREAL=${CEREAL_ROOT}/include +LIBXC=${LIBXC_ROOT} +RAPIDJSON=${RAPIDJSON_ROOT} +LIBRI=${LIBRI_ROOT} +LIBCOMM=${LIBCOMM_ROOT} USE_CUDA=OFF # set ON to enable gpu-abacus # NEP_DIR=$INSTALL_DIR/NEP_CPU-main # LIBTORCH=$INSTALL_DIR/libtorch-2.1.2/share/cmake/Torch diff --git a/toolchain/build_abacus_intel.sh b/toolchain/build_abacus_intel.sh index b1ca04f5ab9..d08118935fe 100755 --- a/toolchain/build_abacus_intel.sh +++ b/toolchain/build_abacus_intel.sh @@ -22,13 +22,12 @@ BUILD_DIR=build_abacus_intel rm -rf $BUILD_DIR PREFIX=$ABACUS_DIR -ELPA=$INSTALL_DIR/elpa-2024.05.001/cpu -# ELPA=$INSTALL_DIR/elpa-2024.05.001/nvidia # for gpu-lcao -CEREAL=$INSTALL_DIR/cereal-master/include -LIBXC=$INSTALL_DIR/libxc-7.0.0 -RAPIDJSON=$INSTALL_DIR/rapidjson-master -LIBRI=$INSTALL_DIR/LibRI-master -LIBCOMM=$INSTALL_DIR/LibComm-master +ELPA=${ELPA_ROOT} +CEREAL=${CEREAL_ROOT}/include +LIBXC=${LIBXC_ROOT} +RAPIDJSON=${RAPIDJSON_ROOT} +LIBRI=${LIBRI_ROOT} +LIBCOMM=${LIBCOMM_ROOT} USE_CUDA=OFF # set ON to enable gpu-abacus # NEP_DIR=$INSTALL_DIR/NEP_CPU-main # LIBTORCH=$INSTALL_DIR/libtorch-2.1.2/share/cmake/Torch diff --git a/toolchain/install_abacus_toolchain.sh b/toolchain/install_abacus_toolchain.sh deleted file mode 100755 index 19fe2328aba..00000000000 --- a/toolchain/install_abacus_toolchain.sh +++ /dev/null @@ -1,899 +0,0 @@ -#!/bin/bash -e - -# TODO: Remove this deprecated script in the future. -# shellcheck disable=all - -[ "${BASH_SOURCE[0]}" ] && SCRIPT_NAME="${BASH_SOURCE[0]}" || SCRIPT_NAME=$0 -SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_NAME")" && pwd -P)" - -# ------------------------------------------------------------------------ -# Work directories and used files -# ------------------------------------------------------------------------ -export ROOTDIR="${PWD}" -export SCRIPTDIR="${ROOTDIR}/scripts" -export BUILDDIR="${ROOTDIR}/build" -export INSTALLDIR="${ROOTDIR}/install" -export SETUPFILE="${INSTALLDIR}/setup" -export SHA256_CHECKSUM="${SCRIPTDIR}/checksums.sha256" - -# ------------------------------------------------------------------------ -# Make a copy of all options for $SETUPFILE -# ------------------------------------------------------------------------ -TOOLCHAIN_OPTIONS="$@" - -# ------------------------------------------------------------------------ -# DEPRECATED WARNING -# ------------------------------------------------------------------------ -echo "" -echo -e "\033[1;31m╔══════════════════════════════════════════════════════════════════════════════╗\033[0m" -echo -e "\033[1;31m║ [DEPRECATED] ║\033[0m" -echo -e "\033[1;31m║ ║\033[0m" -echo -e "\033[1;33m║ This script (install_abacus_toolchain.sh) will be deprecated soon. ║\033[0m" -echo -e "\033[1;33m║ ║\033[0m" -echo -e "\033[1;32m║ Please migrate to the refactored version: ║\033[0m" -echo -e "\033[1;32m║ → install_abacus_toolchain_new.sh ║\033[0m" -echo -e "\033[1;32m║ ║\033[0m" -echo -e "\033[1;36m║ Migration Guide: ║\033[0m" -echo -e "\033[1;36m║ • Use toolchain_*.sh frontend scripts for easier configuration ║\033[0m" -echo -e "\033[1;36m║ • New version supports main/alt package version switch ║\033[0m" -echo -e "\033[1;36m║ • Improved parameter handling and error reporting ║\033[0m" -echo -e "\033[1;31m║ ║\033[0m" -echo -e "\033[1;31m╚══════════════════════════════════════════════════════════════════════════════╝\033[0m" -echo "" -echo -e "\033[1;33mContinuing with legacy script in 3 seconds...\033[0m" -sleep 3 -echo "" - -# ------------------------------------------------------------------------ -# Load common variables and tools -# ------------------------------------------------------------------------ -source "${SCRIPTDIR}"/common_vars.sh -source "${SCRIPTDIR}"/tool_kit.sh - -# ------------------------------------------------------------------------ -# Documentation -# ------------------------------------------------------------------------ -show_help() { - cat << EOF -This script will help you compile and install, -or link libraries ABACUS depends on, -and give setup files that you can use to compile ABACUS. - -USAGE: - -$(basename $SCRIPT_NAME) [options] - -A MORE RECOMMENDED way is to use it by pre-setting workflow scripts: -> gcc-openmpi-openblas environments: toolchain_gnu.sh -> intel-mkl-mpi environments: toolchain_intel.sh -> intel-mpich environments: toolchain_intel_mpich.sh -> AMD environments: toolchain_amd.sh [in development] - -OPTIONS: - --h, --help Show this message. --j Number of processors to use for compilation, if - this option is not present, then the script - automatically tries to determine the number of - processors you have and try to use all of the - processors. ---no-check-certificate If you encounter "certificate verification" errors - from wget or ones saying that "common name doesn't - match requested host name" while at tarball downloading - stage, then the recommended solution is to install - the newest wget release. Alternatively, you can use - this option to bypass the verification and proceed with - the download. Security wise this should still be okay - as the installation script will check file checksums - after every tarball download. Nevertheless use this - option at your own risk. ---install-all This option will set value of all --with-PKG - options to "install". You can selectively set - --with-PKG to another value again by having the - --with-PKG option placed AFTER this option on the - command line. ---mpi-mode Selects which MPI flavour to use. Available options - are: mpich, openmpi, intelmpi, and no. By selecting "no", - MPI is not supported and disabled. By default the script - will try to determine the flavour based on the MPI library - currently available in your system path. For CRAY (CLE) - systems, the default flavour is mpich. Note that explicitly - setting --with-mpich, --with-openmpi or --with-intelmpi - options to values other than no will also switch --mpi-mode - to the respective mode. ---math-mode Selects which core math library to use. Available options - are: cray, mkl, and openblas. The option "cray" - corresponds to cray libsci, and is the default for CRAY - (CLE) systems. For non-CRAY systems, if env variable MKLROOT - exists then mkl will be default, otherwise openblas is the - default option. Explicitly setting --with-mkl, - or --with-openblas options will switch --math-mode to the - respective modes. ---gpu-ver Selects the GPU architecture for which to compile. Available - options: CUDA architecture number (7.5 / 75, 8.0 / 80, etc) or no - This setting determines the value of nvcc's '-arch' flag. - Default = no. ---log-lines Number of log file lines dumped in case of a non-zero exit code. - Default = 200 ---target-cpu Compile for the specified target CPU (e.g. haswell or generic), i.e. - do not optimize for the actual host system which is the default (native) ---dry-run Write only config files, but don't actually build packages. ---pack-run Only check and install required packages without actually unpack and build packages - -The --enable-FEATURE options follow the rules: - --enable-FEATURE=yes Enable this particular feature - --enable-FEATURE=no Disable this particular feature - --enable-FEATURE The option keyword alone is equivalent to - --enable-FEATURE=yes - ===== NOTICE: THESE GPU FEATURE IS ON TESTING ===== - --enable-cuda Turn on GPU (CUDA) support (can be combined - with --enable-opencl). - Default = no - --enable-hip Turn on GPU (HIP) support. - Default = no - --enable-opencl Turn on OpenCL (GPU) support. Requires the OpenCL - development packages and runtime. If combined with - --enable-cuda, OpenCL alongside of CUDA is used. - Default = no - --enable-cray Turn on or off support for CRAY Linux Environment - (CLE) manually. By default the script will automatically - detect if your system is CLE, and provide support - accordingly. - -The --with-PKG options follow the rules: - --with-PKG=install Will download the package in \$PWD/build and - install the library package in \$PWD/install. - --with-PKG=system The script will then try to find the required - libraries of the package from the system path - variables such as PATH, LD_LIBRARY_PATH and - CPATH etc. - --with-PKG=no Do not use the package. - --with-PKG= The package will be assumed to be installed in - the given , and be linked accordingly. - --with-PKG The option keyword alone will be equivalent to - --with-PKG=install - - --with-gcc Use the GNU compiler to use to build ABACUS. - Default = system - --with-intel Use the Intel compiler to build ABACUS. - Default = system - --with-intel-classic Use the classic Intel compiler (icc, icpc, ifort) to compile ABACUS. - Default = no - --with-ifx Use the new Intel Fortran compiler ifx instead of ifort to compile dependence of ABACUS, along with mpiifx (if --with-intel-classic=no) - Default = yes - --with-amd Use the AMD compiler to build ABACUS. - Default = system - --with-flang Use flang in AMD compiler, which may lead to problem and efficiency loss in ELPA - Default = no - --with-cmake Cmake utilities - Default = install - --with-openmpi OpenMPI, important if you want a parallel version of ABACUS. - Default = system - --with-mpich MPICH, MPI library like OpenMPI. one should - use only one of OpenMPI, MPICH or Intel MPI. - Default = system - --with-mpich-device Select the MPICH device, implies the use of MPICH as MPI library - Default = ch4 - --with-intelmpi Intel MPI, MPI library like OpenMPI. one should - use only one of OpenMPI, MPICH or Intel MPI. - Default = system - --with-intel-mpi-clas Use the classic Intelmpi compiler (mpiicc, mpiicpc and mpiifort) - Default = no - --with-libxc libxc, exchange-correlation library. Needed for - QuickStep DFT and hybrid calculations. - Default = install - --with-fftw FFTW3, library for fast fourier transform - Default = install - --with-mkl Intel Math Kernel Library, which provides LAPACK, and BLAS. - If MKL's FFTW3 interface is suitable (no FFTW-MPI support), - it replaces the FFTW library. If the ScaLAPACK component is - found, it replaces the one specified by --with-scalapack. - Default = system - --with-aocl AMD Optimizing CPU Libraries, which provides LAPACK, BLAS, FFTW, ScaLAPACK - the ScaLAPACK and FFTW can directly use which in AOCL by setting --with-scalapack=system and --with-fftw=system if AOCL in system environment. - related scripts are in development to incorporate scalapack and fftw once for all. - Default = system - --with-openblas OpenBLAS is a free high performance LAPACK and BLAS library, - the successor to GotoBLAS. - Default = install - --with-scalapack Parallel linear algebra library, needed for parallel - calculations. - Default = install - --with-elpa Eigenvalue SoLvers for Petaflop-Applications library. - Fast library for large parallel jobs, Especially for ABACUS LCAO - Default = install - --with-cereal Enable cereal for ABACUS LCAO - Default = install - --with-rapidjson Enable rapidjson for ABACUS to read/write json files - Default = install - --with-libtorch Enable libtorch the machine learning framework needed for DeePKS - Default = no - --with-libnpy Enable libnpy the machine learning framework needed for DeePKS - Default = no - --with-libri Enable LibRI for higher-level methods like hybrid functionals, RPA or GW - Default = no - --with-libcomm Enable LibComm for higher-level methods like hybrid functionals, RPA or GW - Default = no - --with-nep Enable NEP (CPU version) for machine learning potentials - Default = no - -FURTHER INSTRUCTIONS - -All packages to be installed locally will be downloaded and built inside -./build, and then installed into package specific directories inside -./install. - -Both ./build and ./install are safe to delete, as they contain -only the files and directories that are generated by this script. However, -once all the packages are installed, and you compile ABACUS using the setup -files provided by this script, then you must keep ./install in exactly -the same location as it was first created, as it contains tools and libraries -your version of ABACUS binary will depend on. - -It should be safe to terminate running of this script in the middle of a -build process. The script will know if a package has been successfully -installed, and will just carry on and recompile and install the last -package it is working on. This is true even if you lose the content of -the entire ./build directory. - - +----------------------------------------------------------------+ - | YOU SHOULD ALWAYS SOURCE ./install/setup or ./abacus_env.sh | - | BEFORE YOU RUN ABACUS COMPILED WITH THIS TOOLCHAIN | - +----------------------------------------------------------------+ - -EOF -} - -# ------------------------------------------------------------------------ -# PACKAGE LIST: register all new dependent tools and libs here. Order -# is important, the first in the list gets installed first -# ------------------------------------------------------------------------ -tool_list="gcc intel amd cmake" -mpi_list="mpich openmpi intelmpi" -math_list="mkl aocl openblas" -lib_list="fftw libxc scalapack elpa cereal rapidjson libtorch libnpy libri libcomm nep" -package_list="${tool_list} ${mpi_list} ${math_list} ${lib_list}" -# ------------------------------------------------------------------------ - -# first set everything to __DONTUSE__ -for ii in ${package_list}; do - eval with_${ii}="__DONTUSE__" -done - -# ------------------------------------------------------------------------ -# Work out default settings -# ------------------------------------------------------------------------ - -# tools to turn on by default: -with_gcc="__SYSTEM__" - -# libs to turn on by default, the math and mpi libraries are chosen by there respective modes: -with_fftw="__INSTALL__" -with_libxc="__INSTALL__" -with_scalapack="__INSTALL__" -# default math library setting: openblas -export MATH_MODE="openblas" -with_openblas="__INSTALL__" -with_elpa="__INSTALL__" -with_cereal="__INSTALL__" -with_rapidjson="__INSTALL__" -# with_libtorch="__DONTUSE__" # default -# with_libnpy="__DONTUSE__" -# with_libri="__DONTUSE__" -# with_libcomm="__DONTUSE__" -# for MPI, we try to detect system MPI variant -if (command -v mpiexec > /dev/null 2>&1); then - # check if we are dealing with openmpi, mpich or intelmpi - if (mpiexec --version 2>&1 | grep -s -q "HYDRA"); then - echo "MPI is detected and it appears to be MPICH" - export MPI_MODE="mpich" - with_mpich="__SYSTEM__" - elif (mpiexec --version 2>&1 | grep -s -q "OpenRTE"); then - echo "MPI is detected and it appears to be OpenMPI" - export MPI_MODE="openmpi" - with_openmpi="__SYSTEM__" - elif (mpiexec --version 2>&1 | grep -s -q "Intel"); then - echo "MPI is detected and it appears to be Intel MPI" - with_gcc="__DONTUSE__" - with_amd="__DONTUSE__" - with_aocl="__DONTUSE__" - with_intel="__SYSTEM__" - with_intelmpi="__SYSTEM__" - export MPI_MODE="intelmpi" - else # default to mpich - echo "MPI is detected and defaults to MPICH" - export MPI_MODE="mpich" - with_mpich="__SYSTEM__" - fi -else - report_warning $LINENO "No MPI installation detected (ignore this message in Cray Linux Environment or when MPI installation was requested)." - export MPI_MODE="no" -fi - -# default enable options -dry_run="__FALSE__" -enable_tsan="__FALSE__" -enable_opencl="__FALSE__" -enable_cuda="__FALSE__" -enable_hip="__FALSE__" -export intel_classic="no" -# no, then icc->icx, icpc->icpx -# which cannot compile elpa in AMD server -# due to some so-called cross-compile problem -# and will lead to problem in force calculation -# but icx is recommended by intel compiler -# option: --with-intel-classic can change it to yes/no -# QuantumMisaka by 2023.08 -export PACK_RUN="__FALSE__" -export INTELMPI_CLASSIC="no" -export WITH_IFX="yes" # whether ifx is used in oneapi -export WITH_FLANG="no" # whether flang is used in aocc -export OPENMPI_4TH="no" # whether openmpi downgrade -export GPUVER="no" -export MPICH_DEVICE="ch4" -export TARGET_CPU="native" - -# default for log file dump size -export LOG_LINES="200" - -# defaults for CRAY Linux Environment -if [ "${CRAY_LD_LIBRARY_PATH}" ]; then - enable_cray="__TRUE__" - export MATH_MODE="cray" - # Default MPI used by CLE is assumed to be MPICH, in any case - # do not use the installers for the MPI libraries - with_mpich="__DONTUSE__" - with_openmpi="__DONTUSE__" - with_intelmpi="__DONTUSE__" - export MPI_MODE="mpich" - # set default value for some installers appropriate for CLE - with_gcc="__DONTUSE__" - with_amd="__DONTUSE__" - with_aocl="__DONTUSE__" - with_intel="__DONTUSE__" - with_fftw="__SYSTEM__" - with_scalapack="__DONTUSE__" -else - enable_cray="__FALSE__" -fi - -# ------------------------------------------------------------------------ -# parse user options -# ------------------------------------------------------------------------ -while [ $# -ge 1 ]; do - case ${1} in - -j) - case "${2}" in - -*) - export NPROCS_OVERWRITE="$(get_nprocs)" - ;; - [0-9]*) - shift - export NPROCS_OVERWRITE="${1}" - ;; - *) - report_error ${LINENO} \ - "The -j flag can only be followed by an integer number, found ${2}." - exit 1 - ;; - esac - ;; - -j[0-9]*) - export NPROCS_OVERWRITE="${1#-j}" - ;; - --no-check-certificate) - export DOWNLOADER_FLAGS="--no-check-certificate" - ;; - --install-all) - # set all package to the default installation status - for ii in ${package_list}; do - if [ "${ii}" != "intel" ] && - [ "${ii}" != "intelmpi" ] && - [ "${ii}" != "amd" ]; then - eval with_${ii}="__INSTALL__" - fi - done - # I'd like to use OpenMPI as default -- QuantumMisaka in 2023.09.17 - export MPI_MODE="openmpi" - ;; - --mpi-mode=*) - user_input="${1#*=}" - case "$user_input" in - mpich) - export MPI_MODE="mpich" - ;; - openmpi) - export MPI_MODE="openmpi" - ;; - intelmpi) - export MPI_MODE="intelmpi" - ;; - no) - export MPI_MODE="no" - ;; - *) - report_error ${LINENO} \ - "--mpi-mode currently only supports openmpi, mpich, intelmpi and no as options" - exit 1 - ;; - esac - ;; - --math-mode=*) - user_input="${1#*=}" - case "$user_input" in - cray) - export MATH_MODE="cray" - ;; - aocl) - export MATH_MODE="aocl" - with_aocl="__SYSTEM__" - with_fftw="__SYSTEM__" - with_scalapack="__SYSTEM__" - ;; - mkl) - export MATH_MODE="mkl" - ;; - openblas) - export MATH_MODE="openblas" - ;; - *) - report_error ${LINENO} \ - "--math-mode currently only supports mkl, aocl, openblas and cray as options" - ;; - esac - ;; - --gpu-ver=*) - user_input="${1#*=}" - export GPUVER="${user_input}" - ;; - --target-cpu=*) - user_input="${1#*=}" - export TARGET_CPU="${user_input}" - ;; - --log-lines=*) - user_input="${1#*=}" - export LOG_LINES="${user_input}" - ;; - --dry-run) - dry_run="__TRUE__" - ;; - --pack-run) - PACK_RUN="__TRUE__" - ;; - --enable-tsan*) - enable_tsan=$(read_enable $1) - if [ "${enable_tsan}" = "__INVALID__" ]; then - report_error "invalid value for --enable-tsan, please use yes or no" - exit 1 - fi - ;; - --enable-cuda*) - enable_cuda=$(read_enable $1) - if [ $enable_cuda = "__INVALID__" ]; then - report_error "invalid value for --enable-cuda, please use yes or no" - exit 1 - fi - ;; - --enable-hip*) - enable_hip=$(read_enable $1) - if [ "${enable_hip}" = "__INVALID__" ]; then - report_error "invalid value for --enable-hip, please use yes or no" - exit 1 - fi - ;; - --enable-opencl*) - enable_opencl=$(read_enable $1) - if [ $enable_opencl = "__INVALID__" ]; then - report_error "invalid value for --enable-opencl, please use yes or no" - exit 1 - fi - ;; - --enable-cray*) - enable_cray=$(read_enable $1) - if [ "${enable_cray}" = "__INVALID__" ]; then - report_error "invalid value for --enable-cray, please use yes or no" - exit 1 - fi - ;; - --with-gcc*) - with_gcc=$(read_with "${1}") - ;; - --with-cmake*) - with_cmake=$(read_with "${1}") - ;; - --with-mpich-device=*) - user_input="${1#*=}" - export MPICH_DEVICE="${user_input}" - export MPI_MODE=mpich - ;; - --with-mpich*) - with_mpich=$(read_with "${1}") - if [ "${with_mpich}" != "__DONTUSE__" ]; then - export MPI_MODE=mpich - fi - ;; - --with-4th-openmpi*) - OPENMPI_4TH=$(read_with "${1}" "no") # default new openmpi - ;; - --with-openmpi*) - with_openmpi=$(read_with "${1}") - if [ "${with_openmpi}" != "__DONTUSE__" ]; then - export MPI_MODE=openmpi - fi - ;; - --with-intelmpi*) - with_intelmpi=$(read_with "${1}" "__SYSTEM__") - if [ "${with_intelmpi}" != "__DONTUSE__" ]; then - export MPI_MODE=intelmpi - fi - ;; - --with-intel-classic*) - intel_classic=$(read_with "${1}" "no") # default new intel compiler - ;; - --with-intel-mpi-clas*) - INTELMPI_CLASSIC=$(read_with "${1}" "no") # default new intel mpi compiler - ;; - --with-intel*) # must be read after items above - with_intel=$(read_with "${1}" "__SYSTEM__") - ;; - --with-ifx*) - WITH_IFX=$(read_with "${1}" "yes") # default yes - ;; - --with-amd*) - with_amd=$(read_with "${1}" "__SYSTEM__") - ;; - --with-flang*) - WITH_FLANG=$(read_with "${1}" "no") - ;; - --with-aocl*) - with_aocl=$(read_with "${1}" "__SYSTEM__") - ;; - --with-libxc*) - with_libxc=$(read_with "${1}") - ;; - --with-fftw*) - with_fftw=$(read_with "${1}") - ;; - --with-mkl*) - with_mkl=$(read_with "${1}" "__SYSTEM__") - if [ "${with_mkl}" != "__DONTUSE__" ]; then - export MATH_MODE="mkl" - fi - ;; - --with-openblas*) - with_openblas=$(read_with "${1}") - if [ "${with_openblas}" != "__DONTUSE__" ]; then - export MATH_MODE="openblas" - fi - ;; - --with-scalapack*) - with_scalapack=$(read_with "${1}") - ;; - --with-elpa*) - with_elpa=$(read_with "${1}") - ;; - --with-libtorch*) - with_libtorch=$(read_with "${1}") - ;; - --with-cereal*) - with_cereal=$(read_with "${1}") - ;; - --with-rapidjson*) - with_rapidjson=$(read_with "${1}") - ;; - --with-libnpy*) - with_libnpy=$(read_with "${1}") - ;; - --with-libri*) - with_libri=$(read_with "${1}") - ;; - --with-libcomm*) - with_libcomm=$(read_with "${1}") - ;; - --with-nep*) - with_nep=$(read_with "${1}") - ;; - --help*) - show_help - exit 0 - ;; - -h*) - show_help - exit 0 - ;; - *) - report_error "Unknown flag: $1" - exit 1 - ;; - esac - shift -done - -# consolidate settings after user input -export ENABLE_TSAN="${enable_tsan}" -export ENABLE_CUDA="${enable_cuda}" -export ENABLE_HIP="${enable_hip}" -export ENABLE_OPENCL="${enable_opencl}" -export ENABLE_CRAY="${enable_cray}" - -# ------------------------------------------------------------------------ -# Check and solve known conflicts before installations proceed -# ------------------------------------------------------------------------ -# Check GCC version: -# Quantum Misaka in 2025-05-05 -if [ "${with_gcc}" != "__INSTALL__" ] -then - export GCC_MIN_VERSION=5 - echo "Checking system GCC version for gcc, intel and amd toolchain" - echo "Your System gcc/g++/gfortran version should be consistent" - echo "Minimum required version: ${GCC_MIN_VERSION}" - gcc_version=$(gcc --version | head -n 1 | awk '{print $NF}') - gxx_version=$(g++ --version | head -n 1 | awk '{print $NF}') - gfc_version=$(gfortran --version | head -n 1 | awk '{print $NF}') - echo "Your gcc version: ${gcc_version}" - echo "Your g++ version: ${gxx_version}" - echo "Your gfortran version: ${gfc_version}" - - if [ "${gcc_version}" != "${gxx_version}" ] || [ "${gcc_version}" != "${gfc_version}" ]; then - echo "Your gcc/g++/gfortran version are not consistent !!!" - exit 1 - fi - - extract_major() { - echo $1 | awk -F. '{print $1}' - } - - gcc_major=$(extract_major "${gcc_version}") - if [ "${gcc_major}" -lt "${GCC_MIN_VERSION}" ] - then - echo "Your GCC version do not be larger than ${GCC_MIN_VERSION} !!!" - exit 1 - fi - echo "Your GCC version seems to be enough for ABACUS installation." -fi - -# Compiler conflicts -if [ "${with_intel}" != "__DONTUSE__" ] && [ "${with_gcc}" = "__INSTALL__" ]; then - echo "You have chosen to use the Intel compiler, therefore the installation of the GNU compiler will be skipped." - with_gcc="__SYSTEM__" -fi -if [ "${with_amd}" != "__DONTUSE__" ] && [ "${with_gcc}" = "__INSTALL__" ]; then - echo "You have chosen to use the AMD compiler, therefore the installation of the GNU compiler will be skipped." - with_gcc="__SYSTEM__" -fi -if [ "${with_amd}" != "__DONTUSE__" ] && [ "${with_intel}" != "__DONTUSE__" ]; then - report_error "You have chosen to use the AMD and the Intel compiler to compile dependent packages. Select only one compiler." - exit 1 -fi - -# MPI library conflicts -if [ "${MPI_MODE}" = "no" ]; then - if [ "${with_scalapack}" != "__DONTUSE__" ]; then - echo "Not using MPI, so scalapack is disabled." - with_scalapack="__DONTUSE__" - fi - if [ "${with_elpa}" != "__DONTUSE__" ]; then - echo "Not using MPI, so ELPA is disabled." - with_elpa="__DONTUSE__" - fi -else - # if gcc is installed, then mpi needs to be installed too - if [ "${with_gcc}" = "__INSTALL__" ]; then - echo "You have chosen to install the GNU compiler, therefore MPI libraries have to be installed too" - case ${MPI_MODE} in - mpich) - with_mpich="__INSTALL__" - with_openmpi="__DONTUSE__" - ;; - openmpi) - with_mpich="__DONTUSE__" - with_openmpi="__INSTALL__" - ;; - esac - echo "and the use of the Intel compiler and Intel MPI will be disabled." - with_intel="__DONTUSE__" - with_intelmpi="__DONTUSE__" - fi - # Enable only one MPI implementation - case ${MPI_MODE} in - mpich) - with_openmpi="__DONTUSE__" - with_intelmpi="__DONTUSE__" - ;; - openmpi) - with_mpich="__DONTUSE__" - with_intelmpi="__DONTUSE__" - ;; - intelmpi) - with_mpich="__DONTUSE__" - with_openmpi="__DONTUSE__" - ;; - esac -fi -# If MATH_MODE is mkl ,then openblas, scalapack and fftw is not needed -# QuantumMisaka in 2023-09-17 -if [ "${MATH_MODE}" = "mkl" ]; then - if [ "${with_openblas}" != "__DONTUSE__" ]; then - echo "Using MKL, so openblas is disabled." - with_openblas="__DONTUSE__" - fi - if [ "${with_scalapack}" != "__DONTUSE__" ]; then - echo "Using MKL, so scalapack is disabled." - with_scalapack="__DONTUSE__" - fi - if [ "${with_fftw}" != "__DONTUSE__" ]; then - echo "Using MKL, so fftw is disabled." - with_fftw="__DONTUSE__" - fi -fi - -# Select the correct compute number based on the GPU architecture -# QuantumMisaka in 2025-03-19 -export ARCH_NUM="${GPUVER//.}" - -# If CUDA or HIP are enabled, make sure the GPU version has been defined. -if [ "${ENABLE_CUDA}" = "__TRUE__" ] || [ "${ENABLE_HIP}" = "__TRUE__" ]; then - if [ "${GPUVER}" = "no" ]; then - report_error "Please choose GPU architecture to compile for with --gpu-ver" - exit 1 - fi - if [[ "$ARCH_NUM" =~ ^[1-9][0-9]*$ ]] || [ $ARCH_NUM = "no" ]; then - echo "Notice: GPU compilation is enabled, and GPU compatibility is set via --gpu-ver to sm_${ARCH_NUM}." - else - report_error ${LINENO} \ - "When GPU compilation is enabled, the --gpu-ver variable should be properly set regarding to GPU compatibility. For check your GPU compatibility, visit https://developer.nvidia.com/cuda-gpus. For example: A100 -> 8.0 (or 80), V100 -> 7.0 (or 70), 4090 -> 8.9 (or 89)" - exit 1 - fi -fi - -# ABACUS itself and some dependencies require cmake. -if [ "${with_cmake}" = "__DONTUSE__" ]; then - report_error "CMake is required for ABACUS and some dependencies. Please enable it." - exit 1 -fi - - -# ------------------------------------------------------------------------ -# Preliminaries -# ------------------------------------------------------------------------ - -mkdir -p ${INSTALLDIR} - -# ------------------------------------------------------------------------ -# Start writing setup file -# ------------------------------------------------------------------------ -cat << EOF > "$SETUPFILE" -#!/bin/bash -source "${SCRIPTDIR}/tool_kit.sh" -export ABACUS_TOOLCHAIN_OPTIONS="${TOOLCHAIN_OPTIONS}" -EOF - -# ------------------------------------------------------------------------ -# Special settings for CRAY Linux Environment (CLE) -# TODO: CLE should be handle like gcc or Intel using a with_cray flag and -# this section should be moved to a separate file install_cray. -# ------------------------------------------------------------------------ -if [ "${ENABLE_CRAY}" = "__TRUE__" ]; then - echo "------------------------------------------------------------------------" - echo "CRAY Linux Environment (CLE) is detected" - echo "------------------------------------------------------------------------" - # add cray paths to system search path - export LIB_PATHS="CRAY_LD_LIBRARY_PATH ${LIB_PATHS}" - # set compilers to CLE wrappers - check_command cc - check_command ftn - check_command CC - export CC="cc" - export CXX="CC" - export FC="ftn" - export F90="${FC}" - export F77="${FC}" - export MPICC="${CC}" - export MPICXX="${CXX}" - export MPIFC="${FC}" - export MPIFORT="${MPIFC}" - export MPIF77="${MPIFC}" - # CRAY libsci should contains core math libraries, scalapack - # doesn't need LDFLAGS or CFLAGS, nor do the one need to - # explicitly link the math and scalapack libraries, as all is - # taken care of by the cray compiler wrappers. - if [ "$with_scalapack" = "__DONTUSE__" ]; then - export CP_DFLAGS="${CP_DFLAGS} IF_MPI(-D__SCALAPACK|)" - fi - case $MPI_MODE in - mpich) - if [ "$MPICH_DIR" ]; then - cray_mpich_include_path="$MPICH_DIR/include" - cray_mpich_lib_path="$MPICH_DIR/lib" - export INCLUDE_PATHS="$INCLUDE_PATHS cray_mpich_include_path" - export LIB_PATHS="$LIB_PATHS cray_mpich_lib_path" - fi - if [ "$with_mpich" = "__DONTUSE__" ]; then - add_include_from_paths MPI_CFLAGS "mpi.h" $INCLUDE_PATHS - add_include_from_paths MPI_LDFLAGS "libmpi.*" $LIB_PATHS - export MPI_CFLAGS - export MPI_LDFLAGS - export MPI_LIBS=" " - export CP_DFLAGS="${CP_DFLAGS} IF_MPI(-D__parallel|)" - fi - ;; - openmpi) - if [ "$with_openmpi" = "__DONTUSE__" ]; then - add_include_from_paths MPI_CFLAGS "mpi.h" $INCLUDE_PATHS - add_include_from_paths MPI_LDFLAGS "libmpi.*" $LIB_PATHS - export MPI_CFLAGS - export MPI_LDFLAGS - export MPI_LIBS="-lmpi -lmpi_cxx" - export CP_DFLAGS="${CP_DFLAGS} IF_MPI(-D__parallel|)" - fi - ;; - intelmpi) - if [ "$with_intelmpi" = "__DONTUSE__" ]; then - with_gcc="__DONTUSE__" - with_intel="__SYSTEM__" - add_include_from_paths MPI_CFLAGS "mpi.h" $INCLUDE_PATHS - add_include_from_paths MPI_LDFLAGS "libmpi.*" $LIB_PATHS - export MPI_CFLAGS - export MPI_LDFLAGS - export MPI_LIBS="-lmpi -lmpi_cxx" - export CP_DFLAGS="${CP_DFLAGS} IF_MPI(-D__parallel|)" - fi - ;; - esac - check_lib -lz - check_lib -ldl - export CRAY_EXTRA_LIBS="-lz -ldl" - # the space is intentional, so that the variable is non-empty and - # can pass require_env checks - export SCALAPACK_LDFLAGS=" " - export SCALAPACK_LIBS=" " -fi - -# ------------------------------------------------------------------------ -# Installing tools required for building ABACUS and associated libraries -# ------------------------------------------------------------------------ - -echo "Compiling with $(get_nprocs) processes for target ${TARGET_CPU}." - -write_toolchain_env ${INSTALLDIR} - -# write toolchain config -echo "tool_list=\"${tool_list}\"" > ${INSTALLDIR}/toolchain.conf -for ii in ${package_list}; do - install_mode="$(eval echo \${with_${ii}})" - echo "with_${ii}=\"${install_mode}\"" >> ${INSTALLDIR}/toolchain.conf -done - -# ------------------------------------------------------------------------ -# Build packages unless dry-run or pack-run mode is enabled. -# ------------------------------------------------------------------------ -if [ "${dry_run}" = "__TRUE__" ]; then - echo "Wrote only configuration files (--dry-run)." -else - echo "# Leak suppressions" > ${INSTALLDIR}/lsan.supp - ./scripts/stage0/install_stage0.sh - ./scripts/stage1/install_stage1.sh - ./scripts/stage2/install_stage2.sh - ./scripts/stage3/install_stage3.sh - ./scripts/stage4/install_stage4.sh - -cat << EOF -========================== usage ========================= -Done! -To use the installed tools and libraries and ABACUS version -compiled with it you will first need to execute at the prompt: - source ${SETUPFILE} -To build ABACUS by gnu-toolchain, just use: - ./build_abacus_gnu.sh -To build ABACUS by intel-toolchain, just use: - ./build_abacus_intel.sh -To build ABACUS by amd-toolchain in gcc-aocl, just use: - ./build_abacus_gcc-aocl.sh -To build ABACUS by amd-toolchain in aocc-aocl, just use: - ./build_abacus_aocc-aocl.sh -or you can modify the builder scripts to suit your needs. -EOF - -fi - -#EOF diff --git a/toolchain/install_abacus_toolchain_new.sh b/toolchain/install_abacus_toolchain_new.sh index cb4e0708c8a..0f0d85d42e3 100755 --- a/toolchain/install_abacus_toolchain_new.sh +++ b/toolchain/install_abacus_toolchain_new.sh @@ -20,7 +20,6 @@ export SCRIPTDIR="${ROOTDIR}/scripts" export BUILDDIR="${ROOTDIR}/build" export INSTALLDIR="${ROOTDIR}/install" export SETUPFILE="${INSTALLDIR}/setup" -export SHA256_CHECKSUM="${SCRIPTDIR}/checksums.sha256" # Make a copy of all options for $SETUPFILE TOOLCHAIN_OPTIONS="$@" diff --git a/toolchain/scripts/VERSION b/toolchain/scripts/VERSION index 6f4471799a0..4a7fd7bbc33 100644 --- a/toolchain/scripts/VERSION +++ b/toolchain/scripts/VERSION @@ -1,2 +1,2 @@ # version file to force a rebuild of the entire toolchain -VERSION="2025.3" \ No newline at end of file +VERSION="2026.1" diff --git a/toolchain/scripts/lib/config_manager.sh b/toolchain/scripts/lib/config_manager.sh index 7e1afcef6e5..edbbbe2fe73 100644 --- a/toolchain/scripts/lib/config_manager.sh +++ b/toolchain/scripts/lib/config_manager.sh @@ -609,12 +609,16 @@ config_export_to_env() { # Export all configuration values as environment variables for key in "${!CONFIG_CACHE[@]}"; do - export "$key"="${CONFIG_CACHE[$key]}" + case "$key" in + enable_*) ;; + *) export "$key"="${CONFIG_CACHE[$key]}" ;; + esac done - # Backward compatibility for stage scripts expecting uppercase GPU flags - # Installers (e.g., stage3/install_elpa.sh) read ENABLE_CUDA, not enable_cuda export ENABLE_CUDA="${CONFIG_CACHE[enable_cuda]}" + export ENABLE_HIP="${CONFIG_CACHE[enable_hip]}" + export ENABLE_OPENCL="${CONFIG_CACHE[enable_opencl]}" + export ENABLE_CRAY="${CONFIG_CACHE[enable_cray]:-"__FALSE__"}" # Export package list variables export tool_list diff --git a/toolchain/scripts/lib/config_validator.sh b/toolchain/scripts/lib/config_validator.sh index 9af8736d3a2..5064637f2b3 100644 --- a/toolchain/scripts/lib/config_validator.sh +++ b/toolchain/scripts/lib/config_validator.sh @@ -404,6 +404,22 @@ validate_system_requirements() { "This will install GCC ${gcc_ver:-13.2.0} (>= $gcc_min_version.x) with full C++17/C++20 support." return fi + + local cuda_major="" + if command -v nvidia-smi &> /dev/null; then + local nvidia_banner=$(nvidia-smi 2>/dev/null | head -n 3 | tr '\n' ' ') + local cuda_version=$(echo "$nvidia_banner" | sed -n 's/.*CUDA Version:[[:space:]]*\([0-9.]*\).*/\1/p') + cuda_major=$(echo "$cuda_version" | awk -F. '{print $1}') + fi + if [[ "$gcc_major" -ge 14 ]] && [[ -n "$cuda_major" ]] && [[ "$cuda_major" =~ ^[0-9]+$ ]] && [[ "$cuda_major" -lt 13 ]]; then + add_validation_warning_group "gcc_cuda_compat_risk" \ + "Potential GCC/CUDA incompatibility risk detected:" \ + " gcc: $gcc_version (>= 14)" \ + " cuda: $cuda_major.x (< 13, from nvidia-smi)" \ + "" \ + "This combination may cause CUDA-related compilation failures." \ + "Consider upgrading CUDA (>= 13) or using a compatible GCC toolchain." + fi # Success - add informational message add_validation_info "System GCC toolchain validated: version $gcc_version (>= $gcc_min_version.x required)" diff --git a/toolchain/scripts/lib/user_interface.sh b/toolchain/scripts/lib/user_interface.sh index 2c18d7bf3f2..08a75f40450 100644 --- a/toolchain/scripts/lib/user_interface.sh +++ b/toolchain/scripts/lib/user_interface.sh @@ -777,7 +777,24 @@ ui_show_summary() { fi fi - echo " └─ GPU: $gpu_info" + local nvidia_driver="unavailable" + local cuda_version="unavailable" + if command -v nvidia-smi &> /dev/null; then + nvidia_driver=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader 2>/dev/null | head -n1 | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') + if [[ -z "$nvidia_driver" ]]; then + nvidia_driver="unavailable" + fi + local nvidia_banner=$(nvidia-smi 2>/dev/null | head -n 3 | tr '\n' ' ') + cuda_version=$(echo "$nvidia_banner" | sed -n 's/.*CUDA Version:[[:space:]]*\([0-9.]*\).*/\1/p') + cuda_version=$(echo "$cuda_version" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') + if [[ -z "$cuda_version" ]]; then + cuda_version="unavailable" + fi + fi + + echo " ├─ GPU: $gpu_info" + echo " ├─ NVIDIA Driver: $nvidia_driver" + echo " └─ CUDA Version: $cuda_version" echo "" # Configuration box with aligned formatting @@ -1176,4 +1193,4 @@ ui_check_system_requirements() { ui_success "All required system tools are available" return 0 fi -} \ No newline at end of file +} diff --git a/toolchain/scripts/stage3/install_elpa.sh b/toolchain/scripts/stage3/install_elpa.sh index a3d981fa690..ab343c1a793 100755 --- a/toolchain/scripts/stage3/install_elpa.sh +++ b/toolchain/scripts/stage3/install_elpa.sh @@ -115,7 +115,7 @@ case "$with_elpa" in fi for TARGET in "cpu" "nvidia"; do # Accept both uppercase and lowercase GPU enable flags for compatibility - gpu_enabled="${ENABLE_CUDA:-${enable_cuda}}" + gpu_enabled="${ENABLE_CUDA}" [ "$TARGET" = "nvidia" ] && [ "$gpu_enabled" != "__TRUE__" ] && continue # disable cpu if cuda is enabled, only install one [ "$TARGET" != "nvidia" ] && [ "$gpu_enabled" = "__TRUE__" ] && continue diff --git a/toolchain/scripts/stage4/install_cereal.sh b/toolchain/scripts/stage4/install_cereal.sh index f087aff9da2..b03861d4c19 100755 --- a/toolchain/scripts/stage4/install_cereal.sh +++ b/toolchain/scripts/stage4/install_cereal.sh @@ -30,8 +30,13 @@ if [[ -z "$version_suffix" && -n "${ABACUS_TOOLCHAIN_VERSION_SUFFIX}" ]]; then fi # Load package variables with appropriate version load_package_vars "cereal" "$version_suffix" -dirname="cereal-${cereal_ver}" -filename="cereal-${cereal_ver}.tar.gz" +if [[ "${cereal_ver}" =~ ^[0-9a-f]{40}$ ]]; then + short_ver="${cereal_ver:0:7}" +else + short_ver="${cereal_ver}" +fi +dirname="cereal-${short_ver}" +filename="cereal-${short_ver}.tar.gz" source "${INSTALLDIR}"/toolchain.conf source "${INSTALLDIR}"/toolchain.env @@ -77,7 +82,7 @@ case "$with_cereal" in cp -r $dirname/* "${pkg_install_dir}/" write_checksums "${install_lock_file}" "${SCRIPT_DIR}/stage4/$(basename ${SCRIPT_NAME})" fi - CEREAL_CFLAGS="-I'${pkg_install_dir}'" + CEREAL_CFLAGS="-I'${pkg_install_dir}/include'" ;; __SYSTEM__) echo "==================== Finding CEREAL from system paths ====================" @@ -105,7 +110,7 @@ case "$with_cereal" in echo "==================== Linking CEREAL to user paths ====================" pkg_install_dir="${with_cereal}" check_dir "${pkg_install_dir}" - CEREAL_CFLAGS="-I'${pkg_install_dir}'" + CEREAL_CFLAGS="-I'${pkg_install_dir}/include'" ;; esac if [ "$with_cereal" != "__DONTUSE__" ]; then diff --git a/toolchain/scripts/stage4/install_libcomm.sh b/toolchain/scripts/stage4/install_libcomm.sh index 37c4ce69ab9..c7b14f075e6 100755 --- a/toolchain/scripts/stage4/install_libcomm.sh +++ b/toolchain/scripts/stage4/install_libcomm.sh @@ -34,8 +34,13 @@ if [[ -z "$version_suffix" && -n "${ABACUS_TOOLCHAIN_VERSION_SUFFIX}" ]]; then fi # Load package variables with appropriate version load_package_vars "libcomm" "$version_suffix" -dirname="LibComm-${libcomm_ver}" -filename="LibComm-${libcomm_ver}.tar.gz" +if [[ "${libcomm_ver}" =~ ^[0-9a-f]{40}$ ]]; then + short_ver="${libcomm_ver:0:7}" +else + short_ver="${libcomm_ver}" +fi +dirname="LibComm-${short_ver}" +filename="LibComm-${short_ver}.tar.gz" source "${INSTALLDIR}"/toolchain.conf source "${INSTALLDIR}"/toolchain.env @@ -79,7 +84,7 @@ case "$with_libcomm" in cp -r $dirname/* "${pkg_install_dir}/" write_checksums "${install_lock_file}" "${SCRIPT_DIR}/stage4/$(basename ${SCRIPT_NAME})" fi - LIBCOMM_CFLAGS="-I'${pkg_install_dir}'" + LIBCOMM_CFLAGS="-I'${pkg_install_dir}/include'" ;; __SYSTEM__) echo "==================== Finding LIBCOMM from system paths ====================" @@ -107,7 +112,7 @@ case "$with_libcomm" in echo "==================== Linking LIBCOMM to user paths ====================" pkg_install_dir="${with_libcomm}" check_dir "${pkg_install_dir}" - LIBCOMM_CFLAGS="-I'${pkg_install_dir}'" + LIBCOMM_CFLAGS="-I'${pkg_install_dir}/include'" ;; esac if [ "$with_libcomm" != "__DONTUSE__" ]; then diff --git a/toolchain/scripts/stage4/install_libnpy.sh b/toolchain/scripts/stage4/install_libnpy.sh index 5b5bacc2b05..bdd7c2b3959 100755 --- a/toolchain/scripts/stage4/install_libnpy.sh +++ b/toolchain/scripts/stage4/install_libnpy.sh @@ -71,7 +71,7 @@ case "$with_libnpy" in cp -r $dirname/* "${pkg_install_dir}/" write_checksums "${install_lock_file}" "${SCRIPT_DIR}/stage4/$(basename ${SCRIPT_NAME})" fi - LIBNPY_CFLAGS="-I'${pkg_install_dir}'" + LIBNPY_CFLAGS="-I'${pkg_install_dir}/include'" ;; __SYSTEM__) echo "==================== Finding LIBNPY from system paths ====================" @@ -99,7 +99,7 @@ case "$with_libnpy" in echo "==================== Linking LIBNPY to user paths ====================" pkg_install_dir="${with_libnpy}" check_dir "${pkg_install_dir}" - LIBNPY_CFLAGS="-I'${pkg_install_dir}'" + LIBNPY_CFLAGS="-I'${pkg_install_dir}/include'" ;; esac if [ "$with_libnpy" != "__DONTUSE__" ]; then diff --git a/toolchain/scripts/stage4/install_libri.sh b/toolchain/scripts/stage4/install_libri.sh index 0efea870ff3..a3dd10422d6 100755 --- a/toolchain/scripts/stage4/install_libri.sh +++ b/toolchain/scripts/stage4/install_libri.sh @@ -34,6 +34,11 @@ if [[ -z "$version_suffix" && -n "${ABACUS_TOOLCHAIN_VERSION_SUFFIX}" ]]; then fi # Load package variables with appropriate version load_package_vars "libri" "$version_suffix" +if [[ "${libri_ver}" =~ ^[0-9a-f]{40}$ ]]; then + short_ver="${libri_ver:0:7}" +else + short_ver="${libri_ver}" +fi source "${INSTALLDIR}"/toolchain.conf source "${INSTALLDIR}"/toolchain.env @@ -45,7 +50,7 @@ cd "${BUILDDIR}" case "$with_libri" in __INSTALL__) echo "==================== Installing LIBRI ====================" - dirname="LibRI-${libri_ver}" + dirname="LibRI-${short_ver}" pkg_install_dir="${INSTALLDIR}/$dirname" #pkg_install_dir="${HOME}/lib/libri/${libri_ver}" install_lock_file="${pkg_install_dir}/install_successful" @@ -57,7 +62,7 @@ case "$with_libri" in else url="https://codeload.github.com/abacusmodeling/LibRI/tar.gz/v${libri_ver}" fi - filename="LibRI-${libri_ver}.tar.gz" + filename="LibRI-${short_ver}.tar.gz" if verify_checksums "${install_lock_file}"; then echo "$dirname is already installed, skipping it." else @@ -78,7 +83,7 @@ case "$with_libri" in cp -r $dirname/* "${pkg_install_dir}/" write_checksums "${install_lock_file}" "${SCRIPT_DIR}/stage4/$(basename ${SCRIPT_NAME})" fi - LIBRI_CFLAGS="-I'${pkg_install_dir}'" + LIBRI_CFLAGS="-I'${pkg_install_dir}/include'" ;; __SYSTEM__) echo "==================== Finding LIBRI from system paths ====================" @@ -106,7 +111,7 @@ case "$with_libri" in echo "==================== Linking LIBRI to user paths ====================" pkg_install_dir="${with_libri}" check_dir "${pkg_install_dir}" - LIBRI_CFLAGS="-I'${pkg_install_dir}'" + LIBRI_CFLAGS="-I'${pkg_install_dir}/include'" ;; esac if [ "$with_libri" != "__DONTUSE__" ]; then diff --git a/toolchain/scripts/stage4/install_rapidjson.sh b/toolchain/scripts/stage4/install_rapidjson.sh index 1529911e6f1..6cb560af429 100755 --- a/toolchain/scripts/stage4/install_rapidjson.sh +++ b/toolchain/scripts/stage4/install_rapidjson.sh @@ -32,6 +32,11 @@ if [[ -z "$version_suffix" && -n "${ABACUS_TOOLCHAIN_VERSION_SUFFIX}" ]]; then fi # Load package variables with appropriate version load_package_vars "rapidjson" "$version_suffix" +if [[ "${rapidjson_ver}" =~ ^[0-9a-f]{40}$ ]]; then + short_ver="${rapidjson_ver:0:7}" +else + short_ver="${rapidjson_ver}" +fi source "${INSTALLDIR}"/toolchain.conf source "${INSTALLDIR}"/toolchain.env @@ -44,7 +49,7 @@ cd "${BUILDDIR}" case "$with_rapidjson" in __INSTALL__) echo "==================== Installing RapidJSON ====================" - dirname="rapidjson-${rapidjson_ver}" + dirname="rapidjson-${short_ver}" pkg_install_dir="${INSTALLDIR}/$dirname" #pkg_install_dir="${HOME}/lib/rapidjson/${rapidjson_ver}" install_lock_file="${pkg_install_dir}/install_successful" @@ -56,7 +61,7 @@ case "$with_rapidjson" in else url="https://codeload.github.com/Tencent/rapidjson/tar.gz/v${rapidjson_ver}" fi - filename="rapidjson-${rapidjson_ver}.tar.gz" + filename="rapidjson-${short_ver}.tar.gz" if verify_checksums "${install_lock_file}"; then echo "$dirname is already installed, skipping it." else diff --git a/toolchain/scripts/tool_kit.sh b/toolchain/scripts/tool_kit.sh index ad86f8404c8..b7d377cef9d 100755 --- a/toolchain/scripts/tool_kit.sh +++ b/toolchain/scripts/tool_kit.sh @@ -128,6 +128,7 @@ You can manually install requirements packages via: 1. Download from www.cp2k.org/static/downloads (for OpenBLAS, OpenMPI and Others) 2. Download from github.com (especially for CEREAL, RapidJSON, libnpy, LibRI and other stage4 packages) 3. for Intel-oneAPI and AMD AOCC/AOCL, please contact your server manager or visit their official website +4. For users in China, you can try Gitee mirror: git clone https://gitee.com/jamesmisaka/abacus_toolchain_build.git EOF } diff --git a/toolchain/toolchain_aocc-aocl.sh b/toolchain/toolchain_aocc-aocl.sh index edfe1f20ae8..bfc4b4bc1a2 100755 --- a/toolchain/toolchain_aocc-aocl.sh +++ b/toolchain/toolchain_aocc-aocl.sh @@ -68,8 +68,8 @@ PACK_RUN_MODE="no" # Set to "yes" to enable pack-run mode # Refer to scripts/package_versions.sh for specific version numbers CMAKE_VERSION="main" # main=3.31.7, alt=3.30.5 -OPENMPI_VERSION="main" # main=5.0.8, alt=4.1.6 -ELPA_VERSION="main" # main=2025.06.001, alt=2024.05.001 +OPENMPI_VERSION="main" # main=5.0.10, alt=4.1.8 +ELPA_VERSION="main" # main=2026.02.001, alt=2024.05.001 LIBXC_VERSION="main" # main=7.0.0, alt=6.2.2 # Optional Libraries LIBTORCH_VERSION="main" # main=2.1.2, alt=1.12.1 (use alt for older GLIBC) diff --git a/toolchain/toolchain_gcc-aocl.sh b/toolchain/toolchain_gcc-aocl.sh index 7a21670ab9f..f77b533989c 100755 --- a/toolchain/toolchain_gcc-aocl.sh +++ b/toolchain/toolchain_gcc-aocl.sh @@ -65,8 +65,8 @@ PACK_RUN_MODE="no" # Set to "yes" to enable pack-run mode # Refer to scripts/package_versions.sh for specific version numbers CMAKE_VERSION="main" # main=3.31.7, alt=3.30.5 -OPENMPI_VERSION="main" # main=5.0.8, alt=4.1.6 -ELPA_VERSION="main" # main=2025.06.001, alt=2024.05.001 +OPENMPI_VERSION="main" # main=5.0.10, alt=4.1.8 +ELPA_VERSION="main" # main=2026.02.001, alt=2024.05.001 LIBXC_VERSION="main" # main=7.0.0, alt=6.2.2 # Optional Libraries LIBTORCH_VERSION="main" # main=2.1.2, alt=1.12.1 (use alt for older GLIBC) diff --git a/toolchain/toolchain_gcc-mkl.sh b/toolchain/toolchain_gcc-mkl.sh index bb88f00fad0..7f7b9da8e17 100755 --- a/toolchain/toolchain_gcc-mkl.sh +++ b/toolchain/toolchain_gcc-mkl.sh @@ -65,8 +65,8 @@ PACK_RUN_MODE="no" # Set to "yes" to enable pack-run mode # Refer to scripts/package_versions.sh for specific version numbers CMAKE_VERSION="main" # main=3.31.7, alt=3.30.5 -OPENMPI_VERSION="main" # main=5.0.8, alt=4.1.6 -ELPA_VERSION="main" # main=2025.06.001, alt=2024.05.001 +OPENMPI_VERSION="main" # main=5.0.10, alt=4.1.8 +ELPA_VERSION="main" # main=2026.02.001, alt=2024.05.001 LIBXC_VERSION="main" # main=7.0.0, alt=6.2.2 # Optional Libraries LIBTORCH_VERSION="main" # main=2.1.2, alt=1.12.1 (use alt for older GLIBC) diff --git a/toolchain/toolchain_gnu.sh b/toolchain/toolchain_gnu.sh index 44cfbe4b362..456d81dacc5 100755 --- a/toolchain/toolchain_gnu.sh +++ b/toolchain/toolchain_gnu.sh @@ -65,12 +65,12 @@ PACK_RUN_MODE="no" # Set to "yes" to enable pack-run mode # Refer to scripts/package_versions.sh for specific version numbers CMAKE_VERSION="main" # main=3.31.7, alt=3.30.5 -OPENMPI_VERSION="main" # main=5.0.8, alt=4.1.6 -MPICH_VERSION="main" # main=4.1.6, alt=4.1.5 -OPENBLAS_VERSION="main" # main=0.3.30, alt=0.3.27 -ELPA_VERSION="main" # main=2025.06.001, alt=2024.05.001 +OPENMPI_VERSION="main" # main=5.0.10, alt=4.1.8 +MPICH_VERSION="main" # main=5.0.1, alt=4.3.2 +OPENBLAS_VERSION="main" # main=0.3.33, alt=0.3.30 +ELPA_VERSION="main" # main=2026.02.001, alt=2024.05.001 LIBXC_VERSION="main" # main=7.0.0, alt=6.2.2 -SCALAPACK_VERSION="main" # main=2.2.2, alt=2.2.1 +SCALAPACK_VERSION="main" # main=2.2.3, alt=2.2.1 # Optional Libraries LIBTORCH_VERSION="main" # main=2.1.2, alt=1.12.1 (use alt for older GLIBC) # Note: main(2.1.2) version of LibTorch need glibc > 2.27 diff --git a/toolchain/toolchain_intel.sh b/toolchain/toolchain_intel.sh index 29138b6e5a3..1f4dedbc215 100755 --- a/toolchain/toolchain_intel.sh +++ b/toolchain/toolchain_intel.sh @@ -85,7 +85,7 @@ INTELMPI_CLASSIC="no" # Set to "yes" to use classic Intel MPI wrappers (mpiicc # Refer to scripts/package_versions.sh for specific version numbers CMAKE_VERSION="main" # main=3.31.7, alt=3.30.5 -ELPA_VERSION="alt" # main=2025.06.001, alt=2024.05.001 for intel oneapi<2024.2 +ELPA_VERSION="alt" # main=2026.02.001, alt=2024.05.001 for intel oneapi<2024.2 LIBXC_VERSION="main" # main=7.0.0, alt=6.2.2 # Optional Libraries LIBTORCH_VERSION="main" # main=2.1.2, alt=1.12.1 (use alt for older GLIBC)