diff --git a/abs.yaml b/abs.yaml index 963f96f6..ae90f8f5 100644 --- a/abs.yaml +++ b/abs.yaml @@ -2,4 +2,7 @@ # variant, so it's specified for both. extra_labels_for_os: osx-arm64: [ventura] -aggregate_check: false \ No newline at end of file +aggregate_check: false + +channels: + - https://staging.continuum.io/prefect/fs/sympy-feedstock/pr10/3afd78c \ No newline at end of file diff --git a/recipe/meta.yaml b/recipe/meta.yaml index bebe2841..5aec6848 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -74,6 +74,7 @@ build: - python * # [megabuild] - numpy * # [megabuild] skip: True # [py<39] + skip: True # [win] requirements: # Keep this list synchronized (except for python*, numpy*) in outputs @@ -175,6 +176,9 @@ requirements: - eigen 3.3.7 - astunparse 1.6.3 - opentelemetry-api + # satisfy overlinking checks + run: + - {{ pin_compatible('intel-openmp') }} # [blas_impl == "mkl"] # these tests are for the libtorch output below, but due to # a particularity of conda-build, that output is defined in @@ -192,6 +196,9 @@ test: outputs: - name: libtorch + build: + missing_dso_whitelist: + - $RPATH/ld64.so.1 # [s390x] - name: pytorch build: string: gpu_cuda{{ cuda_compiler_version | replace('.', '') }}_py{{ CONDA_PY }}h{{ PKG_HASH }}_{{ PKG_BUILDNUM }} # [gpu_variant == "cuda-12"] @@ -201,11 +208,14 @@ outputs: - torchrun = torch.distributed.run:main ignore_run_exports: # [osx] - libuv # [osx] + missing_dso_whitelist: + - $RPATH/ld64.so.1 # [s390x] detect_binary_files_with_prefix: false run_exports: - {{ pin_subpackage('pytorch', max_pin='x.x') }} - {{ pin_subpackage('libtorch', max_pin='x.x') }} skip: True # [py<39] + skip: True # [win] script: build_pytorch.sh # [unix] script: build_pytorch.bat # [win] @@ -355,7 +365,11 @@ outputs: - mock # [linux] - pip - expecttest - - xmlrunner + # unittest-xml-reporting provides xmlrunner + - unittest-xml-reporting + - pytest-rerunfailures + - pytest-flakefinder + - pytest-xdist imports: - torch source_files: