diff --git a/.coveragerc b/.coveragerc
index ced2204062..5937dce43a 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,6 +1,7 @@
[run]
branch = True
source = cwltool
+omit = cwltool/run_job.py
[report]
exclude_lines =
diff --git a/.dockerignore b/.dockerignore
index 2d062154a0..fc71c15e3a 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,6 +1,28 @@
+.coverage
+coverage.xml
.tox/
.eggs/
.vscode/
build/
dist/
+.swp
+.mypy_cache/
+.git/
+.pytest_cache/
+*.whl
+env*/
+testenv*/
+*.img
+*.sif
+*.so
+.github/
+cwltool/*.so
*.Dockerfile
+build-cwltool-docker.sh
+__pycache__/
+*/__pycache__/
+**/__pycache__/
+*.egg-info/
+*.orig
+.dockerignore
+cache*
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000000..c0a96e68ee
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,16 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
+
+version: 2
+updates:
+ - package-ecosystem: "pip" # See documentation for possible values
+ directory: "/" # Location of package manifests
+ schedule:
+ interval: "daily"
+ # Maintain dependencies for GitHub Actions
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "daily"
diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml
new file mode 100644
index 0000000000..b3945fb64e
--- /dev/null
+++ b/.github/workflows/ci-tests.yml
@@ -0,0 +1,189 @@
+name: Continous integration tests
+
+on:
+ push:
+ branches: [ main ]
+ pull_request:
+ branches: [ main ]
+ workflow_dispatch:
+
+env:
+ singularity_version: 3.6.4
+
+jobs:
+
+ tox:
+ name: CI tests via Tox
+
+ runs-on: ubuntu-20.04
+
+ strategy:
+ matrix:
+ py-ver-major: [3]
+ py-ver-minor: [6, 7, 8, 9]
+ step: [lint, unit, bandit, mypy]
+
+ env:
+ py-semver: ${{ format('{0}.{1}', matrix.py-ver-major, matrix.py-ver-minor) }}
+ TOXENV: ${{ format('py{0}{1}-{2}', matrix.py-ver-major, matrix.py-ver-minor, matrix.step) }}
+
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+
+ - name: Set up Singularity
+ if: ${{ matrix.step == 'unit' || matrix.step == 'mypy' }}
+ uses: eWaterCycle/setup-singularity@v6
+ with:
+ singularity-version: ${{ env.singularity_version }}
+
+ - name: Give the test runner user a name to make provenance happy.
+ if: ${{ matrix.step == 'unit' || matrix.step == 'mypy' }}
+ run: sudo usermod -c 'CI Runner' $(whoami)
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ env.py-semver }}
+
+ - name: Cache for pip
+ uses: actions/cache@v2
+ with:
+ path: ~/.cache/pip
+ key: ${{ runner.os }}-pip-${{ matrix.step }}-${{ hashFiles('requirements.txt', 'tox.ini') }}
+
+ - name: Upgrade setuptools and install tox
+ run: |
+ pip install -U pip setuptools wheel
+ pip install tox tox-gh-actions
+
+ - name: MyPy cache
+ if: ${{ matrix.step == 'mypy' }}
+ uses: actions/cache@v2
+ with:
+ path: .mypy_cache/${{ env.py-semver }}
+ key: mypy-${{ env.py-semver }}
+
+ - name: Test with tox
+ run: tox
+
+ - name: Upload coverage to Codecov
+ if: ${{ matrix.step == 'unit' }}
+ uses: codecov/codecov-action@v2.0.3
+ with:
+ fail_ci_if_error: true
+
+ tox-style:
+ name: CI linters via Tox
+
+ runs-on: ubuntu-20.04
+
+ strategy:
+ matrix:
+ step: [lintreadme, shellcheck, pydocstyle]
+
+ env:
+ py-semver: 3.9
+ TOXENV: ${{ format('py39-{0}', matrix.step) }}
+
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: ${{ env.py-semver }}
+
+ - name: Cache for pip
+ uses: actions/cache@v2
+ with:
+ path: ~/.cache/pip
+ key: ${{ runner.os }}-pip-${{ matrix.step }}-${{ hashFiles('requirements.txt') }}
+
+ - name: Upgrade setuptools and install tox
+ run: |
+ pip install -U pip setuptools wheel
+ pip install tox tox-gh-actions
+
+ - if: ${{ matrix.step == 'pydocstyle' && github.event_name == 'pull_request'}}
+ name: Create local branch for diff-quality for PRs
+ run: git branch ${{github.base_ref}} origin/${{github.base_ref}}
+
+ - name: Test with tox
+ run: tox
+
+ conformance_tests:
+ name: CWL spec conformance tests
+
+ runs-on: ubuntu-20.04
+
+ strategy:
+ matrix:
+ cwl-version: [v1.0, v1.1, v1.2]
+ container: [docker, singularity]
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Set up Singularity
+ uses: eWaterCycle/setup-singularity@v6
+ with:
+ singularity-version: ${{ env.singularity_version }}
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.9
+
+ - name: Cache for pip
+ uses: actions/cache@v2
+ with:
+ path: ~/.cache/pip
+ key: ${{ runner.os }}-conformance-${{ matrix.step }}-${{ hashFiles('requirements.txt') }}
+
+ - name: Run CWL conformance tests ${{ matrix.cwl-version }}
+ env:
+ version: ${{ matrix.cwl-version }}
+ container: ${{ matrix.container }}
+ spec_branch: main
+ run: ./conformance-test.sh
+
+ release_test:
+ name: cwltool release test
+
+ runs-on: ubuntu-20.04
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Set up Singularity
+ uses: eWaterCycle/setup-singularity@v6
+ with:
+ singularity-version: ${{ env.singularity_version }}
+
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.9
+
+ - name: Give the test runner user a name to make provenance happy.
+ run: sudo usermod -c 'CI Runner' $(whoami)
+
+ - name: Cache for pip
+ uses: actions/cache@v2
+ with:
+ path: ~/.cache/pip
+ key: ${{ runner.os }}-pip-release-${{ hashFiles('requirements.txt', 'test-requirements.txt') }}
+
+ - name: Install packages
+ run: |
+ pip install -U pip setuptools wheel
+ pip install virtualenv
+
+ - name: Release test
+ env:
+ RELEASE_SKIP: head
+ run: ./release-test.sh
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 87598c18b0..bc4d680278 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -2,7 +2,9 @@ name: "Code scanning - action"
on:
push:
+ branches: [main]
pull_request:
+ branches: [main]
schedule:
- cron: '0 10 * * 2'
@@ -11,19 +13,14 @@ jobs:
runs-on: ubuntu-latest
+ permissions:
+ # required for all workflows
+ security-events: write
+
steps:
- name: Checkout repository
uses: actions/checkout@v2
- with:
- # We must fetch at least the immediate parents so that if this is
- # a pull request then we can checkout the head.
- fetch-depth: 2
- # If this run was triggered by a pull request event, then checkout
- # the head of the pull request instead of the merge commit.
- - run: git checkout HEAD^2
- if: ${{ github.event_name == 'pull_request' }}
-
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
diff --git a/.github/workflows/quay-publish.yml b/.github/workflows/quay-publish.yml
new file mode 100644
index 0000000000..fd9fd1ba8c
--- /dev/null
+++ b/.github/workflows/quay-publish.yml
@@ -0,0 +1,40 @@
+name: publish-quay
+on:
+ push:
+ tags:
+ - '*'
+ workflow_dispatch: {}
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@master
+ - name: Get image tags
+ id: image_tags
+ run: |
+ echo -n ::set-output name=IMAGE_TAGS::${GITHUB_REF#refs/*/}
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v1
+ - name: Login to Quay.io
+ uses: docker/login-action@v1
+ with:
+ registry: ${{ secrets.REGISTRY_SERVER }}
+ username: ${{ secrets.REGISTRY_USERNAME }}
+ password: ${{ secrets.REGISTRY_PASSWORD }}
+ - name: Build and publish cwltool_module image to Quay
+ uses: docker/build-push-action@v2
+ with:
+ file: cwltool.Dockerfile
+ tags: quay.io/commonwl/cwltool_module:${{ steps.image_tags.outputs.IMAGE_TAGS }}
+ target: module
+ push: true
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+ - name: Build and publish cwltool image to Quay
+ uses: docker/build-push-action@v2
+ with:
+ file: cwltool.Dockerfile
+ tags: quay.io/commonwl/cwltool:${{ steps.image_tags.outputs.IMAGE_TAGS }}
+ push: true
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
diff --git a/.mergify.yml b/.mergify.yml
index f2a542e6d0..85cbc7d268 100644
--- a/.mergify.yml
+++ b/.mergify.yml
@@ -2,6 +2,7 @@ pull_request_rules:
- name: Automatic merge on approval and when when GitHub branch protection passes on main
conditions:
- "#approved-reviews-by>=1"
+ - -draft
- base=main
actions:
merge:
@@ -9,11 +10,14 @@ pull_request_rules:
strict: smart+fasttrack
pull_request_rules:
- - name: Automatic merge for leadership team members when there are no reviewers
+ - name: Automatic merge for leadership team members when there are no reviewers and the label is "ready"
conditions:
- "#review-requested=0"
+ - "#changes-requested-reviews-by<1"
+ - -draft
- base=main
- author=@leadership
+ - label=ready
actions:
merge:
method: merge
diff --git a/.snyk b/.snyk
new file mode 100644
index 0000000000..2da43bac40
--- /dev/null
+++ b/.snyk
@@ -0,0 +1,10 @@
+# Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities.
+version: v1.19.0
+# ignores vulnerabilities until expiry date; change duration by modifying expiry date
+ignore:
+ SNYK-PYTHON-NETWORKX-1062709:
+ - '*':
+ reason: Prov does not use the affected code path
+ expires: 2022-08-08T15:27:21.289Z
+ created: 2021-07-09T15:27:21.300Z
+patch: {}
diff --git a/.travis.singularity_key.txt b/.travis.singularity_key.txt
deleted file mode 100644
index b9d4dcaebc..0000000000
--- a/.travis.singularity_key.txt
+++ /dev/null
@@ -1,70 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQGiBEQ7TOgRBADvaRsIZ3VZ6Qy7PlDpdMm97m0OfvouOj/HhjOM4M3ECbGn4cYh
-vN1gK586s3sUsUcNQ8LuWvNsYhxYsVTZymCReJMEDxod0U6/z/oIbpWv5svF3kpl
-ogA66Ju/6cZx62RiCSOkskI6A3Waj6xHyEo8AGOPfzbMoOOQ1TS1u9s2FwCgxziL
-wADvKYlDZnWM03QtqIJVD8UEAOks9Q2OqFoqKarj6xTRdOYIBVEp2jhozZUZmLmz
-pKL9E4NKGfixqxdVimFcRUGM5h7R2w7ORqXjCzpiPmgdv3jJLWDnmHLmMYRYQc8p
-5nqo8mxuO3zJugxBemWoacBDd1MJaH7nK20Hsk9L/jvU/qLxPJotMStTnwO+EpsK
-HlihA/9ZpvzR1QWNUd9nSuNR3byJhaXvxqQltsM7tLqAT4qAOJIcMjxr+qESdEbx
-NHM5M1Y21ZynrsQw+Fb1WHXNbP79vzOxHoZR0+OXe8uUpkri2d9iOocre3NUdpOO
-JHtl6cGGTFILt8tSuOVxMT/+nlo038JQB2jARe4B85O0tkPIPbQybmV1cm8uZGVi
-aWFuLm5ldCBhcmNoaXZlIDxtaWNoYWVsLmhhbmtlQGdtYWlsLmNvbT6IRgQQEQgA
-BgUCTVHJKwAKCRCNEUVjdcAkyOvzAJ0abJz+f2a6VZG1c9T8NHMTYh1atwCgt0EE
-3ZZd/2in64jSzu0miqhXbOKISgQQEQIACgUCSotRlwMFAXgACgkQ93+NsjFEvg8n
-JgCfWcdJbILBtpLZCocvOzlLPqJ0Fn0AoI4EpJRxoUnrtzBGUC1MqecU7WsDiGAE
-ExECACAFAkqLUWcCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCl0y8BJkml
-qVklAJ4h2V6MdQkSAThF5c2Gkq6eSoIQYQCeM0DWyB9Bl+tTPSTYXwwZi2uoif20
-QmFwc3kuZ3NlLnVuaS1tYWdkZWJ1cmcuZGUgRGViaWFuIEFyY2hpdmUgPG1pY2hh
-ZWwuaGFua2VAZ21haWwuY29tPohGBBARAgAGBQJEO03FAAoJEPd/jbIxRL4PU18A
-n3tn7i4qdlMi8kHbYWFoabsKc9beAJ9sl/leZNCYNMGhz+u6BQgyeLKw94heBBMR
-AgAeBQJEO0zoAhsDBgsJCAcDAgMVAgMDFgIBAh4BAheAAAoJEKXTLwEmSaWpVdoA
-n27DvtZizNEbhz3wRUPQMiQjtqdvAJ9rS9YdPe5h5o5gHx3mw3BSkOttdYheBBMR
-AgAeBQJEO0zoAhsDBgsJCAcDAgMVAgMDFgIBAh4BAheAAAoJEKXTLwEmSaWpVdoA
-oLhwWL+E+2I9lrUf4Lf26quOK9vLAKC9ZpIF2tUirFFkBWnQvu13/TA0SokCHAQQ
-AQIABgUCTSNBgQAKCRDAc9Iof/uem4NpEACQ8jxmaCaS/qk/Y4GiwLA5bvKosG3B
-iARZ2v5UWqCZQ1tS56yKse/lCIzXQqU9BnYW6wOI2rvFf9meLfd8h96peG6oKscs
-fbclLDIf68bBvGBQaD0VYFi/Fk/rxmTQBOCQ3AJZs8O5rIM4gPGE0QGvSZ1h7VRw
-3Uyeg4jKXLIeJn2xEmOJgt3auAR2FyKbzHaX9JCoByJZ/eU23akNl9hgt7ePlpXo
-74KNYC58auuMUhCq3BQDB+II4ERYMcmFp1N5ZG05Cl6jcaRRHDXz+Ax6DWprRI1+
-RH/Yyae6LmKpeJNwd+vM14aawnNO9h8IAQ+aJ3oYZdRhGyybbin3giJ10hmWveg/
-Pey91Nh9vBCHdDkdPU0s9zE7z/PHT0c5ccZRukxfZfkrlWQ5iqu3V064ku5f4PBy
-8UPSkETcjYgDnrdnwqIAO+oVg/SFlfsOzftnwUrvwIcZlXAgtP6MEEAs/38e/JIN
-g4VrpdAy7HMGEUsh6Ah6lvGQr+zBnG44XwKfl7e0uCYkrAzUJRGM5vx9iXvFMcMu
-jv9EBNNBOU8/Y6MBDzGZhgaoeI27nrUvaveJXjAiDKAQWBLjtQjINZ8I9uaSGOul
-8kpbFavE4eS3+KhISrSHe4DuAa3dk9zI+FiPvXY1ZyfQBtNpR+gYFY6VxMbHhY1U
-lSLHO2eUIQLdYbRITmV1cm9EZWJpYW4gQXJjaGl2ZSBLZXkgPHBrZy1leHBwc3kt
-bWFpbnRhaW5lcnNAbGlzdHMuYWxpb3RoLmRlYmlhbi5vcmc+iEYEEBEIAAYFAk1R
-yQYACgkQjRFFY3XAJMgEWwCggx4Gqlcrt76TSMlbU94cESo55AEAoJ3asQEMpe8t
-QUX+5aikw3z1AUoCiEoEEBECAAoFAkqf/3cDBQF4AAoJEPd/jbIxRL4PxyMAoKUI
-RPWlHCj/+HSFfwhos68wcSwmAKChuC00qutDro+AOo+uuq6YoHXj+ohgBBMRAgAg
-BQJKn/8bAhsDBgsJCAcDAgQVAggDBBYCAwECHgECF4AACgkQpdMvASZJpalDggCe
-KF9KOgOPdQbFnKXl8KtHory4EEwAnA7jxgorE6kk2QHEXFSF8LzOOH4GiGMEExEC
-ACMCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAUCSp//RgIZAQAKCRCl0y8BJkml
-qekFAKCRyt4+FoCzmBbRUUP3Cr8PzH++IgCgkno4vdjsWdyAey8e0KpITTXMFrmJ
-AhwEEAECAAYFAk0jQYEACgkQwHPSKH/7npsFfw/+P8B8hpM3+T1fgboBa4R32deu
-n8m6b8vZMXwuo/awQtMpzjem8JGXSUQm8iiX4hDtjq6ZoPrlN8T4jNmviBt/F5jI
-Jji/PYmhq+Zn9s++mfx+aF4IJrcHJWFkg/6kJzn4oSdl/YlvKf4VRCcQNtj4xV87
-GsdamnzU17XapLVMbSaVKh+6Af7ZLDerEH+iAq733HsYaTK+1xKmN7EFVXgS7bZ1
-9C4LTzc97bVHSywpT9yIrg9QQs/1kshfVIHDKyhjF6IwzSVbeGAIL3Oqo5zOMkWv
-7JlEIkkhTyl+FETxNMTMYjAk+Uei3kRodneq3YBF2uFYSEzrXQgHAyn37geiaMYj
-h8wu6a85nG1NS0SdxiZDIePmbvD9vWxFZUWYJ/h9ifsLivWcVXlvHoQ0emd+n2ai
-FhAck2xsuyHgnGIZMHww5IkQdu/TMqvbcR6d8Xulh+C4Tq7ppy+oTLADSBKII++p
-JQioYydRD529EUJgVlhyH27X6YAk3FuRD3zYZRYS2QECiKXvS665o3JRJ0ZSqNgv
-YOom8M0zz6bI9grnUoivMI4o7ISpE4ZwffEd37HVzmraaUHDXRhkulFSf1ImtXoj
-V9nNSM5p/+9eP7OioTZhSote6Vj6Ja1SZeRkXZK7BwqPbdO0VsYOb7G//ZiOlqs+
-paRr92G/pwBfj5Dq8EK5Ag0ERDtM9RAIAN0EJqBPvLN0tEin/y4Fe0R4n+E+zNXg
-bBsq4WidwyUFy3h/6u86FYvegXwUqVS2OsEs5MwPcCVJOfaEthF7I89QJnP9Nfx7
-V5I9yFB53o9ii38BN7X+9gSjpfwXOvf/wIDfggxX8/wRFel37GRB7TiiABRArBez
-s5x+zTXvT++WPhElySj0uY8bjVR6tso+d65K0UesvAa7PPWeRS+3nhqABSFLuTTT
-MMbnVXCGesBrYHlFVXClAYrSIOX8Ub/UnuEYs9+hIV7U4jKzRF9WJhIC1cXHPmOh
-vleAf/I9h/0KahD7HLYud40pNBo5tW8jSfp2/Q8TIE0xxshd51/xy4MAAwUH+wWn
-zsYVk981OKUEXul8JPyPxbw05fOd6gF4MJ3YodO+6dfoyIl3bewk+11KXZQALKaO
-1xmkAEO1RqizPeetoadBVkQBp5xPudsVElUTOX0pTYhkUd3iBilsCYKK1/KQ9KzD
-I+O/lRsm6L9lc6rV0IgPU00P4BAwR+x8Rw7TJFbuS0miR3lP1NSguz+/kpjxzmGP
-LyHJ+LVDYFkk6t0jPXhqFdUY6McUTBDEvavTGlVO062l9APTmmSMVFDsPN/rBes2
-rYhuuT+lDp+gcaS1UoaYCIm9kKOteQBnowX9V74Z+HKEYLtwILaSnNe6/fNSTvyj
-g0z+R+sPCY4nHewbVC+ISQQYEQIACQUCRDtM9QIbDAAKCRCl0y8BJkmlqbecAJ9B
-UdSKVg9H+fQNyP5sbOjj4RDtdACfXHrRHa2+XjJP0dhpvJ8IfvYnQsU=
-=fAJZ
------END PGP PUBLIC KEY BLOCK-----
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 838266f5ce..0000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-dist: xenial
-services:
- - docker
-
-before_install:
-- wget -O- http://neuro.debian.net/lists/xenial.us-nh.full | sudo tee /etc/apt/sources.list.d/neurodebian.sources.list
-- sudo apt-key add .travis.singularity_key.txt
-- sudo apt-get update
-- sudo apt-get install -y singularity-container
-
-language: python
-cache:
- pip: true
- directories:
- - .mypy_cache
-
-os:
- - linux
-install:
- - pip install tox-travis
-jobs:
- include:
- - python: "3.6"
- - python: "3.7"
- - python: "3.8"
- - python: "3.9-dev"
- - python: "3.8"
- name: "CWL v1.0 conformance tests"
- script: ${TRAVIS_BUILD_DIR}/travis.bash
- env:
- - version=v1.0
- - python: "3.8"
- name: "CWL v1.1 conformance tests"
- env:
- - version=v1.1
- script: ${TRAVIS_BUILD_DIR}/travis.bash
- - python: "3.7"
- script: RELEASE_SKIP=head ${TRAVIS_BUILD_DIR}/release-test.sh
- name: 'release test'
- - python: "3.8"
- name: "CWL v1.2 conformance tests"
- env:
- - version=v1.2.0-dev4
- script: ${TRAVIS_BUILD_DIR}/travis.bash
-script: tox
-branches:
- only:
- - main
-notifications:
- email: false
diff --git a/Jenkinsfile b/Jenkinsfile
deleted file mode 100644
index eba57b290e..0000000000
--- a/Jenkinsfile
+++ /dev/null
@@ -1,29 +0,0 @@
-pipeline {
- agent {
- node {
- label 'windows'
- }
-
- }
- options {
- timeout(30)
- }
- stages {
- stage('build') {
- steps {
- withPythonEnv(pythonInstallation: 'Windows-CPython-36') {
- pybat(script: 'pip install .', returnStdout: true)
- pybat 'jenkins.bat'
- }
-
- }
- }
- }
- post {
- always {
- junit 'tests.xml'
-
- }
-
- }
-}
diff --git a/MANIFEST.in b/MANIFEST.in
index bd42a86640..336c7f4c78 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,6 +7,7 @@ include tests/*
include tests/tmp1/tmp2/tmp3/.gitkeep
include tests/tmp4/alpha/*
include tests/wf/*
+include tests/wf/operation/*
include tests/override/*
include tests/checker_wf/*
include tests/subgraph/*
diff --git a/Makefile b/Makefile
index 8f985a6293..dbe02adebc 100644
--- a/Makefile
+++ b/Makefile
@@ -25,18 +25,19 @@ MODULE=cwltool
# `SHELL=bash` doesn't work for some, so don't use BASH-isms like
# `[[` conditional expressions.
PYSOURCES=$(wildcard ${MODULE}/**.py tests/*.py) setup.py
-DEVPKGS=diff_cover black pylint coverage pep257 pydocstyle flake8 mypy\
- pytest-xdist isort wheel autoflake -rtest-requirements.txt
+DEVPKGS=diff_cover black pylint pep257 pydocstyle flake8 tox tox-pyenv \
+ isort wheel autoflake flake8-bugbear pyupgrade bandit \
+ -rtest-requirements.txt -rmypy_requirements.txt
DEBDEVPKGS=pep8 python-autopep8 pylint python-coverage pydocstyle sloccount \
python-flake8 python-mock shellcheck
-VERSION=3.0.$(shell TZ=UTC git log --first-parent --max-count=1 \
+
+VERSION=3.1.$(shell TZ=UTC git log --first-parent --max-count=1 \
--format=format:%cd --date=format-local:%Y%m%d%H%M%S)
mkfile_dir := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
UNAME_S=$(shell uname -s)
## all : default task
-all:
- pip install -e .
+all: dev
## help : print this help message and exit
help: Makefile
@@ -45,7 +46,7 @@ help: Makefile
## install-dep : install most of the development dependencies via pip
install-dep: install-dependencies
-install-dependencies:
+install-dependencies: FORCE
pip install --upgrade $(DEVPKGS)
pip install -r requirements.txt
@@ -65,24 +66,29 @@ dev: install-dep
## dist : create a module package for distribution
dist: dist/${MODULE}-$(VERSION).tar.gz
-dist/${MODULE}-$(VERSION).tar.gz: $(SOURCES)
- ./setup.py sdist bdist_wheel
+check-python3:
+# Check that the default python version is python 3
+ python --version 2>&1 | grep "Python 3"
+
+dist/${MODULE}-$(VERSION).tar.gz: check-python3 $(SOURCES)
+ python setup.py sdist bdist_wheel
## docs : make the docs
docs: FORCE
cd docs && $(MAKE) html
## clean : clean up all temporary / machine-generated files
-clean: FORCE
- rm -f ${MODILE}/*.pyc tests/*.pyc
- ./setup.py clean --all || true
+clean: check-python3 FORCE
+ rm -f ${MODULE}/*.pyc tests/*.pyc *.so ${MODULE}/*.so
+ rm -Rf ${MODULE}/__pycache__/
+ python setup.py clean --all || true
rm -Rf .coverage
rm -f diff-cover.html
# Linting and code style related targets
## sorting imports using isort: https://github.com/timothycrosley/isort
-sort_imports:
- isort ${MODULE}/*.py tests/*.py setup.py
+sort_imports: $(PYSOURCES)
+ isort $^
remove_unused_imports: $(PYSOURCES)
autoflake --in-place --remove-all-unused-imports $^
@@ -102,17 +108,20 @@ diff_pydocstyle_report: pydocstyle_report.txt
format:
black --exclude cwltool/schemas setup.py cwltool.py cwltool tests
+format-check:
+ black --diff --check --exclude cwltool/schemas setup.py cwltool.py cwltool tests
+
## pylint : run static code analysis on Python code
pylint: $(PYSOURCES)
pylint --msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" \
$^ -j0|| true
-pylint_report.txt: ${PYSOURCES}
+pylint_report.txt: $(PYSOURCES)
pylint --msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" \
$^ -j0> $@ || true
diff_pylint_report: pylint_report.txt
- diff-quality --violations=pylint pylint_report.txt
+ diff-quality --compare-branch=main --violations=pylint pylint_report.txt
.coverage: testcov
@@ -132,24 +141,24 @@ coverage-report: .coverage
coverage report
diff-cover: coverage.xml
- diff-cover $^
+ diff-cover --compare-branch=main $^
diff-cover.html: coverage.xml
- diff-cover $^ --html-report $@
+ diff-cover --compare-branch=main $^ --html-report $@
## test : run the ${MODULE} test suite
-test: $(pysources)
- python setup.py test --addopts "-n auto --dist=loadfile"
+test: check-python3 $(PYSOURCES)
+ python -m pytest ${PYTEST_EXTRA}
## testcov : run the ${MODULE} test suite and collect coverage
-testcov: $(pysources)
- python setup.py test --addopts "--cov cwltool -n auto --dist=loadfile"
+testcov: check-python3 $(PYSOURCES)
+ python -m pytest --cov --cov-config=.coveragerc --cov-report= ${PYTEST_EXTRA}
-sloccount.sc: ${PYSOURCES} Makefile
+sloccount.sc: $(PYSOURCES) Makefile
sloccount --duplicates --wide --details $^ > $@
## sloccount : count lines of code
-sloccount: ${PYSOURCES} Makefile
+sloccount: $(PYSOURCES) Makefile
sloccount $^
list-author-emails:
@@ -157,35 +166,40 @@ list-author-emails:
@git log --format='%aN,%aE' | sort -u | grep -v 'root'
mypy3: mypy
-mypy: $(filter-out setup.py gittagger.py,${PYSOURCES})
- if ! test -f $(shell python3 -c 'import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))')/py.typed ; \
+mypy: $(filter-out setup.py gittagger.py,$(PYSOURCES))
+ if ! test -f $(shell python -c 'import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))')/py.typed ; \
then \
- rm -Rf typeshed/2and3/ruamel/yaml ; \
- ln -s $(shell python3 -c 'import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))') \
- typeshed/2and3/ruamel/ ; \
+ rm -Rf typeshed/ruamel/yaml ; \
+ ln -s $(shell python -c 'import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))') \
+ typeshed/ruamel/ ; \
fi # if minimally required ruamel.yaml version is 0.15.99 or greater, than the above can be removed
- MYPYPATH=$$MYPYPATH:typeshed/3:typeshed/2and3 mypy --disallow-untyped-calls \
- --warn-redundant-casts \
- $^
+ MYPYPATH=$$MYPYPATH:typeshed mypy $^
-mypyc: ${PYSOURCES}
- MYPYPATH=typeshed/2and3/:typeshed/3 CWLTOOL_USE_MYPYC=1 pip install --verbose -e . && pytest --ignore cwltool/schemas --basetemp ./tmp
+mypyc: $(PYSOURCES)
+ MYPYPATH=typeshed CWLTOOL_USE_MYPYC=1 pip install --verbose -e . \
+ && pytest -vv ${PYTEST_EXTRA}
shellcheck: FORCE
- shellcheck build-cwl-docker.sh cwl-docker.sh release-test.sh travis.bash \
+ shellcheck build-cwltool-docker.sh cwl-docker.sh release-test.sh conformance-test.sh \
cwltool-in-docker.sh
-release-test: FORCE
+pyupgrade: $(PYSOURCES)
+ pyupgrade --exit-zero-even-if-changed --py36-plus $^
+
+release-test: check-python3 FORCE
git diff-index --quiet HEAD -- || ( echo You have uncommited changes, please commit them and try again; false )
./release-test.sh
release: release-test
. testenv2/bin/activate && \
- testenv2/src/${MODULE}/setup.py sdist bdist_wheel && \
+ python testenv2/src/${MODULE}/setup.py sdist bdist_wheel && \
pip install twine && \
twine upload testenv2/src/${MODULE}/dist/* && \
git tag ${VERSION} && git push --tags
+flake8: $(PYSOURCES)
+ flake8 $^
+
FORCE:
# Use this to print the value of a Makefile variable
diff --git a/README.rst b/README.rst
index abd0397580..9f7a9ecb7a 100644
--- a/README.rst
+++ b/README.rst
@@ -2,26 +2,52 @@
Common Workflow Language tool description reference implementation
==================================================================
-|Linux Status| |Windows Status| |Coverage Status| |Downloads|
+|Linux Status| |Coverage Status|
-.. |Linux Status| image:: https://img.shields.io/travis/common-workflow-language/cwltool/main.svg?label=Linux%20builds
- :target: https://travis-ci.org/common-workflow-language/cwltool
+PyPI: |PyPI Version| |PyPI Downloads Month| |Total PyPI Downloads|
-.. |Windows Status| image:: https://img.shields.io/appveyor/ci/mr-c/cwltool/main.svg?label=Windows%20builds
- :target: https://ci.appveyor.com/project/mr-c/cwltool
+Conda: |Conda Version| |Conda Installs|
+
+Debian: |Debian Testing package| |Debian Stable package|
+
+Quay.io (Docker): |Quay.io Container|
+
+.. |Linux Status| image:: https://github.com/common-workflow-language/cwltool/actions/workflows/ci-tests.yml/badge.svg?branch=main
+ :target: https://github.com/common-workflow-language/cwltool/actions/workflows/ci-tests.yml
+
+.. |Debian Stable package| image:: https://badges.debian.net/badges/debian/stable/cwltool/version.svg
+ :target: https://packages.debian.org/stable/cwltool
+
+.. |Debian Testing package| image:: https://badges.debian.net/badges/debian/testing/cwltool/version.svg
+ :target: https://packages.debian.org/testing/cwltool
.. |Coverage Status| image:: https://img.shields.io/codecov/c/github/common-workflow-language/cwltool.svg
:target: https://codecov.io/gh/common-workflow-language/cwltool
-.. |Downloads| image:: https://pepy.tech/badge/cwltool/month
+.. |PyPI Version| image:: https://badge.fury.io/py/cwltool.svg
+ :target: https://badge.fury.io/py/cwltool
+
+.. |PyPI Downloads Month| image:: https://pepy.tech/badge/cwltool/month
:target: https://pepy.tech/project/cwltool
+.. |Total PyPI Downloads| image:: https://static.pepy.tech/personalized-badge/cwltool?period=total&units=international_system&left_color=black&right_color=orange&left_text=Total%20PyPI%20Downloads
+ :target: https://pepy.tech/project/cwltool
+
+.. |Conda Version| image:: https://anaconda.org/conda-forge/cwltool/badges/version.svg
+ :target: https://anaconda.org/conda-forge/cwltool
+
+.. |Conda Installs| image:: https://anaconda.org/conda-forge/cwltool/badges/downloads.svg
+ :target: https://anaconda.org/conda-forge/cwltool
+
+.. |Quay.io Container| image:: https://quay.io/repository/commonwl/cwltool/status
+ :target: https://quay.io/repository/commonwl/cwltool
+
This is the reference implementation of the Common Workflow Language. It is
intended to be feature complete and provide comprehensive validation of CWL
files as well as provide other tools related to working with CWL.
This is written and tested for
-`Python `_ ``3.x {x = 6, 7, 8}``
+`Python `_ ``3.x {x = 6, 7, 8, 9}``
The reference implementation consists of two packages. The ``cwltool`` package
is the primary Python module containing the reference implementation in the
@@ -31,61 +57,81 @@ The ``cwlref-runner`` package is optional and provides an additional entry point
under the alias ``cwl-runner``, which is the implementation-agnostic name for the
default CWL interpreter installed on a host.
-``cwltool`` is provided by the CWL project, `a member project of Software Freedom Conservancy `_ and our `many contributors `_.
+``cwltool`` is provided by the CWL project, `a member project of Software Freedom Conservancy `_
+and our `many contributors `_.
Install
-------
-Your operating system may offer cwltool directly. For `Debian `_ or `Ubuntu `_ try
+``cwltool`` packages
+^^^^^^^^^^^^^^^^^^^^
+
+Your operating system may offer cwltool directly. For `Debian `_, `Ubuntu `_,
+and similar Linux distribution try
.. code:: bash
- apt-get install cwltool
+ sudo apt-get install cwltool
-For MacOS X, other UNIXes or Windows packages prepared by the conda-forge project. Please follow instructions of conda-forge (https://conda-forge.org/#about) for its installation, then perform:
+If you are running macOS X or other UNIXes and you want to use packages prepared by the conda-forge project, then
+please follow the install instructions for `conda-forge `_ (if you haven't already) and then
.. code:: bash
conda install -c conda-forge cwltool
-
-Under the hood, conda setups virtual environments before installing `cwltool` to
-avoid conflicting versions of the same library. When installing cwltool directly,
-it is recommended to do the same manually:
+
+All of the above methods of installing ``cwltool`` use packages that might contain bugs already fixed in newer versions or be missing desired features.
+If the packaged version of ``cwltool`` available to you is too old, then we recommend installing using ``pip`` and ``venv``
.. code:: bash
- virtualenv -p python3 venv # Create a virtual environment
- source venv/bin/activate # Activate environment before installing `cwltool`
+ python3 -m venv env # Create a virtual environment named 'env' in the current directory
+ source env/bin/activate # Activate environment before installing `cwltool`
-Installing the official package from PyPi (will install "cwltool" package as
+Then install the latest ``cwlref-runner`` package from PyPi (which will install the latest ``cwltool`` package as
well)
.. code:: bash
pip install cwlref-runner
-If installing alongside another CWL implementation then
+If installing alongside another CWL implementation (like ``toil-cwl-runner`` or ``arvados-cwl-runner``) then instead run
.. code:: bash
pip install cwltool
-Or you can install from source:
+MS Windows users
+^^^^^^^^^^^^^^^^
+
+1. Install `"Windows Subsystem for Linux 2" (WSL2) and Docker Desktop `_
+2. Install `Debian from the Microsoft Store `_
+3. Set Debian as your default WSL 2 distro: ``wsl --set-default debian``
+4. Reboot if you have not yet already.
+5. Launch Debian and follow the Linux instructions above (``apt-get install cwltool`` or use the ``venv`` method)
+
+``cwltool`` development version
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Or you can skip the direct ``pip`` commands above and install the latest development version of ``cwltool``:
.. code:: bash
- git clone https://github.com/common-workflow-language/cwltool.git # clone cwltool repo
- cd cwltool # Switch to source directory
- pip install . # Install `cwltool` from source
- cwltool --version # Check if the installation works correctly
+ git clone https://github.com/common-workflow-language/cwltool.git # clone (copy) the cwltool git repository
+ cd cwltool # Change to source directory that git clone just downloaded
+ pip install .[deps] # Installs ``cwltool`` from source
+ cwltool --version # Check if the installation works correctly
-Remember, if co-installing multiple CWL implementations then you need to
+Remember, if co-installing multiple CWL implementations, then you need to
maintain which implementation ``cwl-runner`` points to via a symbolic file
system link or `another facility `_.
+Recommended Software
+^^^^^^^^^^^^^^^^^^^^
+
You may also want to have the following installed:
-node.js
-Docker, udocker, or Singularity (optional)
+- `node.js `_
+- Docker, udocker, or Singularity (optional)
Without these, some examples in the CWL tutorials at http://www.commonwl.org/user_guide/ may not work.
@@ -97,7 +143,7 @@ Simple command::
cwl-runner [tool-or-workflow-description] [input-job-settings]
Or if you have multiple CWL implementations installed and you want to override
-the default cwl-runner use::
+the default cwl-runner then use::
cwltool [tool-or-workflow-description] [input-job-settings]
@@ -106,9 +152,9 @@ these will be inserted at the beginning of the command line::
export CWLTOOL_OPTIONS="--debug"
-Use with boot2docker
---------------------
-boot2docker runs Docker inside a virtual machine and it only mounts ``Users``
+Use with boot2docker on macOS
+-----------------------------
+boot2docker runs Docker inside a virtual machine, and it only mounts ``Users``
on it. The default behavior of CWL is to create temporary directories under e.g.
``/Var`` which is not accessible to Docker containers.
@@ -117,24 +163,24 @@ and ``--tmp-outdir-prefix`` to somewhere under ``/Users``::
$ cwl-runner --tmp-outdir-prefix=/Users/username/project --tmpdir-prefix=/Users/username/project wc-tool.cwl wc-job.json
-Using user-space replacements for Docker
-----------------------------------------
+Using uDocker
+-------------
Some shared computing environments don't support Docker software containers for technical or policy reasons.
-As a work around, the CWL reference runner supports using alternative ``docker`` implementations on Linux
+As a workaround, the CWL reference runner supports using alternative ``docker`` implementations on Linux
with the ``--user-space-docker-cmd`` option.
-One such "user space" friendly docker replacement is ``udocker`` https://github.com/indigo-dc/udocker
+One such "user space" friendly docker replacement is ``udocker`` https://github.com/indigo-dc/udocker.
udocker installation: https://github.com/indigo-dc/udocker/blob/master/doc/installation_manual.md#22-install-from-udockertools-tarball
-Run `cwltool` just as you normally would, but with the new option, e.g. from the conformance tests:
+Run `cwltool` just as you usually would, but with the new option, e.g., from the conformance tests
.. code:: bash
- cwltool --user-space-docker-cmd=udocker https://raw.githubusercontent.com/common-workflow-language/common-workflow-language/main/v1.0/v1.0/test-cwl-out2.cwl https://github.com/common-workflow-language/common-workflow-language/blob/main/v1.0/v1.0/empty.json
+ cwltool --user-space-docker-cmd=udocker https://raw.githubusercontent.com/common-workflow-language/common-workflow-language/main/v1.0/v1.0/test-cwl-out2.cwl https://github.com/common-workflow-language/common-workflow-language/raw/main/v1.0/v1.0/empty.json
-``cwltool`` can use `Singularity `_ version 2.6.1
+``cwltool`` can also use `Singularity `_ version 2.6.1
or later as a Docker container runtime.
``cwltool`` with Singularity will run software containers specified in
``DockerRequirement`` and therefore works with Docker images only, native
@@ -143,6 +189,7 @@ runtime, provide ``--singularity`` command line option to ``cwltool``.
With Singularity, ``cwltool`` can pass all CWL v1.0 conformance tests, except
those involving Docker container ENTRYPOINTs.
+Example
.. code:: bash
@@ -155,8 +202,8 @@ Running a tool or workflow from remote or local locations
systems via its support for HTTP[S] URLs.
Input job files and Workflow steps (via the `run` directive) can reference CWL
-documents using absolute or relative local filesytem paths. If a relative path
-is referenced and that document isn't found in the current directory then the
+documents using absolute or relative local filesystem paths. If a relative path
+is referenced and that document isn't found in the current directory, then the
following locations will be searched:
http://www.commonwl.org/v1.0/CommandLineTool.html#Discovering_CWL_documents_on_a_local_filesystem
@@ -185,7 +232,7 @@ workflow step, or command line tool) to the process requirements that should be
Overrides can be specified either on the command line, or as part of the job
input document. Workflow steps are identified using the name of the workflow
file followed by the step name as a document fragment identifier "#id".
-Override identifiers are relative to the toplevel workflow document.
+Override identifiers are relative to the top-level workflow document.
.. code:: bash
@@ -216,7 +263,7 @@ referenced by a workflow and builds a new CWL document with all
Process objects (CommandLineTool and Workflow) in a list in the
``$graph`` field. Cross references (such as ``run:`` and ``source:``
fields) are updated to internal references within the new packed
-document. The top level workflow is named ``#main``.
+document. The top-level workflow is named ``#main``.
.. code:: bash
@@ -228,7 +275,7 @@ Running only part of a workflow
You can run a partial workflow with the ``--target`` (``-t``) option. This
takes the name of an output parameter, workflow step, or input
-parameter in the top level workflow. You may provide multiple
+parameter in the top-level workflow. You may provide multiple
targets.
.. code:: bash
@@ -238,11 +285,11 @@ targets.
If a target is an output parameter, it will only run only the steps
that contribute to that output. If a target is a workflow step, it
will run the workflow starting from that step. If a target is an
-input parameter, it will only run only the steps that are connected to
+input parameter, it will only run the steps connected to
that input.
Use ``--print-targets`` to get a listing of the targets of a workflow.
-To see exactly which steps will run, use ``--print-subgraph`` with
+To see which steps will run, use ``--print-subgraph`` with
``--target`` to get a printout of the workflow subgraph for the
selected targets.
@@ -272,6 +319,30 @@ CWL documents can be expressed as RDF triple graphs.
cwltool --print-rdf --rdf-serializer=turtle mywf.cwl
+Environment Variables in cwltool
+--------------------------------
+
+This reference implementation supports several ways of setting
+environment variables for tools, in addition to the standard
+``EnvVarRequirement``. The sequence of steps applied to create the
+enviroment is:
+
+0. If the ``--preserve-entire-environment`` flag is present, then begin with the current
+ environment, else begin with an empty environment.
+
+1. Add any variables specified by ``--preserve-environment`` option(s).
+
+2. Set ``TMPDIR`` and ``HOME`` per `the CWL v1.0+ CommandLineTool specification `_.
+
+3. Apply any ``EnvVarRequirement`` from the ``CommandLineTool`` description.
+
+4. Apply any manipulations required by any ``cwltool:MPIRequirement`` extensions.
+
+5. Substitute any secrets required by ``Secrets`` extension.
+
+6. Modify the environment in response to ``SoftwareRequirement`` (see below).
+
+
Leveraging SoftwareRequirements (Beta)
--------------------------------------
@@ -292,6 +363,14 @@ This option allows one to specify a dependency resolver's configuration file.
This file may be specified as either XML or YAML and very simply describes various
plugins to enable to "resolve" ``SoftwareRequirement`` dependencies.
+Using these hints will allow cwltool to modify the environment in
+which your tool runs, for example by loading one or more Environment
+Modules. The environment is constructed as above, then the environment
+may modified by the selected tool resolver. This currently means that
+you cannot override any environment variables set by the selected tool
+resolver. Note that the enviroment given to the configured dependency
+resolver has the variable `_CWLTOOL` set to `1` to allow introspection.
+
To discuss some of these plugins and how to configure them, first consider the
following ``hint`` definition for an example CWL tool.
@@ -339,7 +418,7 @@ example does require an existing module setup so it is impossible to test this e
"out of the box" with cwltool. For a more isolated test that demonstrates all
the same concepts - the resolver plugin type ``galaxy_packages`` can be used.
-"Galaxy packages" are a lighter weight alternative to Environment Modules that are
+"Galaxy packages" are a lighter-weight alternative to Environment Modules that are
really just defined by a way to lay out directories into packages and versions
to find little scripts that are sourced to modify the environment. They have
been used for years in Galaxy community to adapt Galaxy tools to cluster
@@ -389,12 +468,12 @@ The resolvers configuration file in the above example was simply:
It is possible that the ``SoftwareRequirement`` s in a given CWL tool will not
match the module names for a given cluster. Such requirements can be re-mapped
-to specific deployed packages and/or versions using another file specified using
+to specific deployed packages or versions using another file specified using
the resolver plugin parameter `mapping_files`. We will
-demonstrate this using `galaxy_packages` but the concepts apply equally well
-to Environment Modules or Conda packages (described below) for instance.
+demonstrate this using `galaxy_packages,` but the concepts apply equally well
+to Environment Modules or Conda packages (described below), for instance.
-So consider the resolvers configuration file
+So consider the resolvers configuration file.
(`tests/test_deps_env_resolvers_conf_rewrite.yml`):
.. code:: yaml
@@ -403,7 +482,7 @@ So consider the resolvers configuration file
base_path: ./tests/test_deps_env
mapping_files: ./tests/test_deps_mapping.yml
-And the corresponding mapping configuraiton file (`tests/test_deps_mapping.yml`):
+And the corresponding mapping configuration file (`tests/test_deps_mapping.yml`):
.. code:: yaml
@@ -431,18 +510,17 @@ support for Homebrew/Linuxbrew plugins is available, the most developed such
plugin is for the `Conda `__ package manager. Conda has the nice properties
of allowing multiple versions of a package to be installed simultaneously,
not requiring evaluated permissions to install Conda itself or packages using
-Conda, and being cross platform. For these reasons, cwltool may run as a normal
+Conda, and being cross-platform. For these reasons, cwltool may run as a normal
user, install its own Conda environment and manage multiple versions of Conda packages
-on both Linux and Mac OS X.
+on Linux and Mac OS X.
The Conda plugin can be endlessly configured, but a sensible set of defaults
that has proven a powerful stack for dependency management within the Galaxy tool
development ecosystem can be enabled by simply passing cwltool the
``--beta-conda-dependencies`` flag.
-With this we can use the seqtk example above without Docker and without
-any externally managed services - cwltool should install everything it needs
-and create an environment for the tool. Try it out with the follwing command::
+With this, we can use the seqtk example above without Docker or any externally managed services - cwltool should install everything it needs
+and create an environment for the tool. Try it out with the following command::
cwltool --beta-conda-dependencies tests/seqtk_seq.cwl tests/seqtk_seq_job.json
@@ -469,7 +547,7 @@ The example can be executed using the command::
cwltool --beta-conda-dependencies tests/seqtk_seq_wrong_name.cwl tests/seqtk_seq_job.json
-The plugin framework for managing resolution of these software requirements
+The plugin framework for managing the resolution of these software requirements
as maintained as part of `galaxy-tool-util `__ - a small,
portable subset of the Galaxy project. More information on configuration and implementation can be found
at the following links:
@@ -529,9 +607,9 @@ in an integer). For example::
Interaction with containers: the MPIRequirement currently prepends its
commands to the front of the command line that is constructed. If you
-wish to run a containerised application in parallel, for simple use
-cases this does work with Singularity, depending upon the platform
-setup. However this combination should be considered "alpha" -- please
+wish to run a containerized application in parallel, for simple use
+cases, this does work with Singularity, depending upon the platform
+setup. However, this combination should be considered "alpha" -- please
do report any issues you have! This does not work with Docker at the
moment. (More precisely, you get `n` copies of the same single process
image run at the same time that cannot communicate with each other.)
@@ -559,8 +637,8 @@ given in the following table; all are optional.
| | | | variables that should be |
| | | | passed from the host |
| | | | environment through to the |
-| | | | tool (e.g. giving the |
-| | | | nodelist as set by your |
+| | | | tool (e.g., giving the |
+| | | | node list as set by your |
| | | | scheduler). |
+----------------+------------------+----------+------------------------------+
| env_pass_regex | List[str] | [] | A list of python regular |
@@ -591,19 +669,19 @@ To run the basic tests after installing `cwltool` execute the following:
.. code:: bash
pip install -rtest-requirements.txt
- py.test --ignore cwltool/schemas/ --pyarg cwltool
+ pytest ## N.B. This requires node.js or docker to be available
-To run various tests in all supported Python environments we use `tox `_. To run the test suite in all supported Python environments
-first downloading the complete code repository (see the ``git clone`` instructions above) and then run
+To run various tests in all supported Python environments, we use `tox `_. To run the test suite in all supported Python environments
+first clone the complete code repository (see the ``git clone`` instructions above) and then run
the following in the terminal:
-``pip install tox; tox``
+``pip install tox; tox -p``
List of all environment can be seen using:
``tox --listenvs``
and running a specfic test env using:
``tox -e ``
and additionally run a specific test using this format:
-``tox -e py36-unit -- tests/test_examples.py::TestParamMatching``
+``tox -e py36-unit -- -v tests/test_examples.py::test_scandeps``
- Running the entire suite of CWL conformance tests:
@@ -611,7 +689,7 @@ The GitHub repository for the CWL specifications contains a script that tests a
implementation against a wide array of valid CWL files using the `cwltest `_
program
-Instructions for running these tests can be found in the Common Workflow Language Specification repository at https://github.com/common-workflow-language/common-workflow-language/blob/main/CONFORMANCE_TESTS.md
+Instructions for running these tests can be found in the Common Workflow Language Specification repository at https://github.com/common-workflow-language/common-workflow-language/blob/main/CONFORMANCE_TESTS.md .
Import as a module
------------------
@@ -647,7 +725,7 @@ Technical outline of how cwltool works internally, for maintainers.
#. Fetches the document from file or URL
#. Applies preprocessing (syntax/identifier expansion and normalization)
#. Validates the document based on cwlVersion
- #. If necessary, updates the document to latest spec
+ #. If necessary, updates the document to the latest spec
#. Constructs a Process object using ``make_tool()``` callback. This yields a
CommandLineTool, Workflow, or ExpressionTool. For workflows, this
recursively constructs each workflow step.
@@ -662,7 +740,7 @@ Technical outline of how cwltool works internally, for maintainers.
there is currently no work ready to run) or end of iteration (indicating
the process is complete.)
#. Invoke the runnable item by calling ``run()``. This runs the tool and gets output.
- #. Output of a process is reported by an output callback.
+ #. An output callback reports the output of a process.
#. ``job()`` may be iterated over multiple times. It will yield all the work
that is currently ready to run and then yield None.
@@ -672,12 +750,11 @@ Technical outline of how cwltool works internally, for maintainers.
inputs the step are ready.
#. When a step is ready, it constructs an input object for that step and
iterates on the ``job()`` method of the workflow job step.
- #. Each runnable item is yielded back up to top level run loop
+ #. Each runnable item is yielded back up to top-level run loop
#. When a step job completes and receives an output callback, the
job outputs are assigned to the output of the workflow step.
#. When all steps are complete, the intermediate files are moved to a final
- workflow output, intermediate directories are deleted, and the output
- callback for the workflow is called.
+ workflow output, intermediate directories are deleted, and the workflow's output callback is called.
#. ``CommandLineTool`` job() objects yield a single runnable object.
@@ -710,7 +787,7 @@ executor
executor(tool, job_order_object, runtimeContext, logger)
(Process, Dict[Text, Any], RuntimeContext) -> Tuple[Dict[Text, Any], Text]
- An implementation of the toplevel workflow execution loop, should
+ An implementation of the top-level workflow execution loop should
synchronously run a process object to completion and return the
output object.
@@ -747,7 +824,7 @@ resolver
resolver(document_loader, document)
(Loader, Union[Text, dict[Text, Any]]) -> Text
- Resolve a relative document identifier to an absolute one which can be fetched.
+ Resolve a relative document identifier to an absolute one that can be fetched.
The following functions can be set in RuntimeContext to override or
augment the listed behaviors.
diff --git a/appveyor.yml b/appveyor.yml
deleted file mode 100644
index 66771f0ad4..0000000000
--- a/appveyor.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-version: .{build}-{branch}
-
-cache:
- - '%LOCALAPPDATA%\pip\Cache'
-
-environment:
-
- SYSTEMROOT: "C:\\WINDOWS"
-
- matrix:
- - PYTHON: "C:\\Python36-x64"
- PYTHON_VERSION: "3.6.x"
- PYTHON_ARCH: "64"
-
- - PYTHON: "C:\\Python37-x64"
- PYTHON_VERSION: "3.7.x"
- PYTHON_ARCH: "64"
-
- - PYTHON: "C:\\Python38-x64"
- PYTHON_VERSION: "3.8.x"
- PYTHON_ARCH: "64"
-
-install:
- - ps: 'Install-Product node 0.12 x64'
- - "set PATH=%PYTHON%\\Scripts;%PATH%"
- - "%PYTHON%\\python.exe -m pip install -U pip setuptools^>=20.3 wheel"
- - "%PYTHON%\\python.exe -m pip install -U codecov -rtest-requirements.txt pytest-xdist"
- # Note the use of a `^` to escape the `>`
-
-build_script:
- - "%PYTHON%\\python.exe -m pip install -rrequirements.txt"
- - "%PYTHON%\\python.exe -m pip install -e .[deps]"
-
-test_script:
- - |
- %PYTHON%\\python.exe -m coverage run --parallel-mode -m pytest --strict -p no:cacheprovider -p no:stepwise --junit-xml=tests.xml
- - "%PYTHON%\\python.exe -m coverage combine"
- - "%PYTHON%\\python.exe -m coverage report"
- - "%PYTHON%\\python.exe -m coverage xml"
- - "%PYTHON%\\python.exe -m codecov --file coverage.xml"
-
-on_finish:
- - ps: |
- $wc = New-Object 'System.Net.WebClient'
- $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($Env:APPVEYOR_JOB_ID)", (Resolve-Path .\tests.xml))
-
-branches:
- only:
- - main
diff --git a/build-cwl-docker.sh b/build-cwl-docker.sh
deleted file mode 100755
index da4b8860ba..0000000000
--- a/build-cwl-docker.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-set -e
-docker build --file=cwltool.Dockerfile --tag=commonworkflowlanguage/cwltool_module --target module .
-docker build --file=cwltool.Dockerfile --tag=commonworkflowlanguage/cwltool .
-
-version=$(git describe --tags)
-if echo "$version" | grep -vq '\-' >& /dev/null ; then
- docker tag commonworkflowlanguage/cwltool_module commonworkflowlanguage/cwltool_module:"$version"
- docker tag commonworkflowlanguage/cwltool commonworkflowlanguage/cwltool:"$version"
-fi
diff --git a/build-cwltool-docker.sh b/build-cwltool-docker.sh
new file mode 100755
index 0000000000..61f8cc031e
--- /dev/null
+++ b/build-cwltool-docker.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+set -ex
+docker build --file=cwltool.Dockerfile --tag=quay.io/commonwl/cwltool_module --target module .
+docker build --file=cwltool.Dockerfile --tag=quay.io/commonwl/cwltool .
+
+docker run -it -v /var/run/docker.sock:/var/run/docker.sock \
+ -v /tmp:/tmp \
+ -v "$PWD":/tmp/cwltool \
+ quay.io/commonwl/cwltool_module /bin/sh -c \
+ "apk add gcc bash && pip install -r/tmp/cwltool/test-requirements.txt ; pytest -k 'not test_bioconda and not test_double_overwrite and not test_env_filtering' -n auto --dist=loadfile --pyargs cwltool"
+
+version=$(git describe --tags)
+if echo "$version" | grep -vq '\-' >& /dev/null ; then
+ docker tag quay.io/commonwl/cwltool_module quay.io/commonwl/cwltool_module:"$version"
+ docker tag quay.io/commonwl/cwltool quay.io/commonwl/cwltool:"$version"
+fi
diff --git a/conformance-test.sh b/conformance-test.sh
new file mode 100755
index 0000000000..e383e06f66
--- /dev/null
+++ b/conformance-test.sh
@@ -0,0 +1,178 @@
+#!/bin/bash
+venv() {
+ if ! test -d "$1" ; then
+ if command -v virtualenv > /dev/null; then
+ virtualenv -p python3 "$1"
+ else
+ python3 -m venv "$1"
+ fi
+ fi
+ # shellcheck source=/dev/null
+ source "$1"/bin/activate
+}
+
+# Set these environment variables when running the script, e.g.:
+# version=v1.1 spec_branch=new_test container=docker ./conformance_test.sh
+
+# Version of the standard to test against
+# Current options: v1.0, v1.1, v1.2
+version=${version:-v1.0}
+
+# Which branch of the standard's repo to use.
+# This can be useful when adding new features
+spec_branch=${spec_branch:-main}
+
+# Which container runtime to use
+# Valid options: docker, singularity
+container=${container:-docker}
+
+set -e
+set -x
+
+if [[ "$version" = "v1.0" ]] ; then
+ repo=common-workflow-language
+else
+ # shellcheck disable=SC2001
+ repo=cwl-$(echo "$version" | sed 's/\(v[0-9]*\.\)\([0-9]*\).*/\1\2/')
+fi
+
+if [ ! -d "${repo}-${spec_branch}" ]; then
+ if [ ! -f "${repo}-${spec_branch}.tar.gz" ]; then
+ wget "https://github.com/common-workflow-language/${repo}/archive/${spec_branch}.tar.gz"
+ fi
+ tar xzf "${spec_branch}.tar.gz"
+fi
+
+if [ "${container}" == "docker" ]; then
+ docker pull node:slim
+fi
+
+venv cwltool-venv3
+pip3 install -U setuptools wheel pip
+pip3 uninstall -y cwltool
+pip3 install -e .
+pip3 install codecov cwltest>=2.1
+pushd "${repo}-${spec_branch}" || exit 1
+
+# shellcheck disable=SC2043
+if [[ "$version" = "v1.0" ]]; then
+ DRAFT="DRAFT=v1.0"
+fi
+# Clean up all cov data
+find . -name '.coverage*' -print0 | xargs -0 rm -f
+rm -f coverage.xml
+
+source=$(realpath ../cwltool)
+COVERAGE_RC=${PWD}/.coveragerc
+cat > "${COVERAGE_RC}" < "${CWLTOOL_WITH_COV}" < 0 )); then
+ EXCLUDE=-S$(IFS=,; echo "${exclusions[*]}")
+else
+ EXCLUDE=""
+fi
+# shellcheck disable=SC2086
+LC_ALL=C.UTF-8 ./run_test.sh --junit-xml=result3.xml ${EXCLUDE} \
+ RUNNER=${CWLTOOL_WITH_COV} "-j$(nproc)" ${BADGE} \
+ ${DRAFT} "${EXTRA}" \
+ "--classname=py3_${container}"
+# LC_ALL=C is to work around junit-xml ASCII only bug
+
+# capture return code of ./run_test.sh
+CODE=$?
+
+find . -name '.coverage.*' -print0 | xargs -0 coverage combine --rcfile="${COVERAGE_RC}" --append
+coverage xml --rcfile="${COVERAGE_RC}"
+codecov --file coverage.xml
+
+if [ -d conformance ]
+then
+ rm -rf conformance/cwltool/cwl_"${version}"/cwltool_latest
+ cp -r conformance/cwltool/cwl_"${version}"/cwltool_"${tool_ver}" conformance/cwltool/cwl_"${version}"/cwltool_latest
+ git -C conformance add --all
+ git -C conformance diff-index --quiet HEAD || git -C conformance commit -m "${CONFORMANCE_MSG}"
+ git -C conformance push http://"${jenkins_cwl_conformance}":x-oauth-basic@github.com/common-workflow-language/conformance.git
+fi
+
+popd || exit
+deactivate
+
+# build new docker container
+# if [ "$GIT_BRANCH" = "origin/main" ] && [[ "$version" = "v1.0" ]]
+# then
+# ./build-cwltool-docker.sh || true
+# fi
+#docker rm -v $(docker ps -a -f status=exited | sed 's/ */ /g' | cut -d' ' -f1)
+exit ${CODE}
diff --git a/cwltool.Dockerfile b/cwltool.Dockerfile
index 8c483ab1e3..b1478a66a2 100644
--- a/cwltool.Dockerfile
+++ b/cwltool.Dockerfile
@@ -1,22 +1,29 @@
-FROM python:3.6-alpine as builder
+FROM python:3.9-alpine as builder
RUN apk add --no-cache git gcc python3-dev libxml2-dev libxslt-dev libc-dev linux-headers
WORKDIR /cwltool
COPY . .
-RUN python setup.py bdist_wheel --dist-dir=/wheels
-RUN pip wheel -r requirements.txt --wheel-dir=/wheels
-RUN pip install --no-index --no-warn-script-location --root=/pythonroot/ /wheels/*.whl
+RUN pip install toml -rmypy_requirements.txt "$(grep ruamel requirements.txt)" \
+ "$(grep schema.salad requirements.txt)"
+# schema-salad is needed to be installed (this time as pure Python) for
+# cwltool + mypyc
+RUN CWLTOOL_USE_MYPYC=1 MYPYPATH=typeshed python setup.py bdist_wheel --dist-dir=/wheels
+RUN pip wheel -r requirements.txt galaxy-tool-util --wheel-dir=/wheels
+RUN rm /wheels/schema_salad*
+RUN SCHEMA_SALAD_USE_MYPYC=1 MYPYPATH=typeshed pip wheel --no-binary :all: $(grep schema.salad requirements.txt) --wheel-dir=/wheels
+RUN pip install --force-reinstall --no-index --no-warn-script-location --root=/pythonroot/ /wheels/*.whl
+# --force-reinstall to install our new mypyc compiled schema-salad package
-FROM python:3.6-alpine as module
-LABEL maintainer peter.amstutz@curoverse.com
+FROM python:3.9-alpine as module
+LABEL maintainer peter.amstutz@curri.com
RUN apk add --no-cache docker nodejs graphviz libxml2 libxslt
COPY --from=builder /pythonroot/ /
-FROM python:3.6-alpine
-LABEL maintainer peter.amstutz@curoverse.com
+FROM python:3.9-alpine
+LABEL maintainer peter.amstutz@curri.com
RUN apk add --no-cache docker nodejs graphviz libxml2 libxslt
COPY --from=builder /pythonroot/ /
diff --git a/cwltool/__init__.py b/cwltool/__init__.py
index 057094f459..030ec0c1fa 100644
--- a/cwltool/__init__.py
+++ b/cwltool/__init__.py
@@ -1,3 +1,12 @@
"""Reference implementation of the CWL standards."""
__author__ = "pamstutz@veritasgenetics.com"
+
+CWL_CONTENT_TYPES = [
+ "text/plain",
+ "application/json",
+ "text/vnd.yaml",
+ "text/yaml",
+ "text/x-yaml",
+ "application/x-yaml",
+]
diff --git a/cwltool/argparser.py b/cwltool/argparser.py
index 205565ad68..fef777de86 100644
--- a/cwltool/argparser.py
+++ b/cwltool/argparser.py
@@ -21,7 +21,7 @@
from .process import Process, shortname
from .resolver import ga4gh_tool_registries
from .software_requirements import SOFTWARE_REQUIREMENTS_ENABLED
-from .utils import DEFAULT_TMP_PREFIX, onWindows
+from .utils import DEFAULT_TMP_PREFIX
def arg_parser() -> argparse.ArgumentParser:
@@ -49,10 +49,10 @@ def arg_parser() -> argparse.ArgumentParser:
type=str,
action="append",
help="Preserve specific environment variable when running "
- "CommandLineTools without a software container. May be provided "
- "multiple times. The default is to preserve only the PATH.",
+ "CommandLineTools. May be provided multiple times. By default PATH is "
+ "preserved when not running in a container.",
metavar="ENVVAR",
- default=["PATH"],
+ default=[],
dest="preserve_environment",
)
envgroup.add_argument(
@@ -64,8 +64,8 @@ def arg_parser() -> argparse.ArgumentParser:
dest="preserve_entire_environment",
)
- exgroup = parser.add_mutually_exclusive_group()
- exgroup.add_argument(
+ containergroup = parser.add_mutually_exclusive_group()
+ containergroup.add_argument(
"--rm-container",
action="store_true",
default=True,
@@ -73,7 +73,7 @@ def arg_parser() -> argparse.ArgumentParser:
dest="rm_container",
)
- exgroup.add_argument(
+ containergroup.add_argument(
"--leave-container",
action="store_false",
default=True,
@@ -123,8 +123,8 @@ def arg_parser() -> argparse.ArgumentParser:
default=DEFAULT_TMP_PREFIX,
)
- exgroup = parser.add_mutually_exclusive_group()
- exgroup.add_argument(
+ intgroup = parser.add_mutually_exclusive_group()
+ intgroup.add_argument(
"--tmp-outdir-prefix",
type=str,
help="Path prefix for intermediate output directories. Defaults to the "
@@ -132,7 +132,7 @@ def arg_parser() -> argparse.ArgumentParser:
default="",
)
- exgroup.add_argument(
+ intgroup.add_argument(
"--cachedir",
type=str,
default="",
@@ -141,8 +141,8 @@ def arg_parser() -> argparse.ArgumentParser:
"troubleshooting of CWL documents.",
)
- exgroup = parser.add_mutually_exclusive_group()
- exgroup.add_argument(
+ tmpgroup = parser.add_mutually_exclusive_group()
+ tmpgroup.add_argument(
"--rm-tmpdir",
action="store_true",
default=True,
@@ -150,7 +150,7 @@ def arg_parser() -> argparse.ArgumentParser:
dest="rm_tmpdir",
)
- exgroup.add_argument(
+ tmpgroup.add_argument(
"--leave-tmpdir",
action="store_false",
default=True,
@@ -158,8 +158,8 @@ def arg_parser() -> argparse.ArgumentParser:
dest="rm_tmpdir",
)
- exgroup = parser.add_mutually_exclusive_group()
- exgroup.add_argument(
+ outgroup = parser.add_mutually_exclusive_group()
+ outgroup.add_argument(
"--move-outputs",
action="store_const",
const="move",
@@ -169,7 +169,7 @@ def arg_parser() -> argparse.ArgumentParser:
dest="move_outputs",
)
- exgroup.add_argument(
+ outgroup.add_argument(
"--leave-outputs",
action="store_const",
const="leave",
@@ -178,7 +178,7 @@ def arg_parser() -> argparse.ArgumentParser:
dest="move_outputs",
)
- exgroup.add_argument(
+ outgroup.add_argument(
"--copy-outputs",
action="store_const",
const="copy",
@@ -188,8 +188,8 @@ def arg_parser() -> argparse.ArgumentParser:
dest="move_outputs",
)
- exgroup = parser.add_mutually_exclusive_group()
- exgroup.add_argument(
+ pullgroup = parser.add_mutually_exclusive_group()
+ pullgroup.add_argument(
"--enable-pull",
default=True,
action="store_true",
@@ -197,7 +197,7 @@ def arg_parser() -> argparse.ArgumentParser:
dest="pull_image",
)
- exgroup.add_argument(
+ pullgroup.add_argument(
"--disable-pull",
default=True,
action="store_false",
@@ -280,65 +280,68 @@ def arg_parser() -> argparse.ArgumentParser:
type=str,
)
- exgroup = parser.add_mutually_exclusive_group()
- exgroup.add_argument(
+ printgroup = parser.add_mutually_exclusive_group()
+ printgroup.add_argument(
"--print-rdf",
action="store_true",
help="Print corresponding RDF graph for workflow and exit",
)
- exgroup.add_argument(
+ printgroup.add_argument(
"--print-dot",
action="store_true",
help="Print workflow visualization in graphviz format and exit",
)
- exgroup.add_argument(
+ printgroup.add_argument(
"--print-pre",
action="store_true",
help="Print CWL document after preprocessing.",
)
- exgroup.add_argument(
+ printgroup.add_argument(
"--print-deps", action="store_true", help="Print CWL document dependencies."
)
- exgroup.add_argument(
+ printgroup.add_argument(
"--print-input-deps",
action="store_true",
help="Print input object document dependencies.",
)
- exgroup.add_argument(
+ printgroup.add_argument(
"--pack",
action="store_true",
help="Combine components into single document and print.",
)
- exgroup.add_argument(
+ printgroup.add_argument(
"--version", action="store_true", help="Print version and exit"
)
- exgroup.add_argument(
+ printgroup.add_argument(
"--validate", action="store_true", help="Validate CWL document only."
)
- exgroup.add_argument(
+ printgroup.add_argument(
"--print-supported-versions",
action="store_true",
help="Print supported CWL specs.",
)
- exgroup.add_argument(
+ printgroup.add_argument(
"--print-subgraph",
action="store_true",
help="Print workflow subgraph that will execute. Can combined with "
- "--target.",
+ "--target or --single-step",
)
- exgroup.add_argument(
+ printgroup.add_argument(
"--print-targets", action="store_true", help="Print targets (output parameters)"
)
+ printgroup.add_argument(
+ "--make-template", action="store_true", help="Generate a template input object"
+ )
- exgroup = parser.add_mutually_exclusive_group()
- exgroup.add_argument(
+ strictgroup = parser.add_mutually_exclusive_group()
+ strictgroup.add_argument(
"--strict",
action="store_true",
help="Strict validation (unrecognized or out of place fields are error)",
default=True,
dest="strict",
)
- exgroup.add_argument(
+ strictgroup.add_argument(
"--non-strict",
action="store_false",
help="Lenient validation (ignore unrecognized fields)",
@@ -354,15 +357,15 @@ def arg_parser() -> argparse.ArgumentParser:
dest="skip_schemas",
)
- exgroup = parser.add_mutually_exclusive_group()
- exgroup.add_argument(
+ doccachegroup = parser.add_mutually_exclusive_group()
+ doccachegroup.add_argument(
"--no-doc-cache",
action="store_false",
help="Disable disk cache for documents loaded over HTTP",
default=True,
dest="doc_cache",
)
- exgroup.add_argument(
+ doccachegroup.add_argument(
"--doc-cache",
action="store_true",
help="Enable disk cache for documents loaded over HTTP",
@@ -370,12 +373,14 @@ def arg_parser() -> argparse.ArgumentParser:
dest="doc_cache",
)
- exgroup = parser.add_mutually_exclusive_group()
- exgroup.add_argument("--verbose", action="store_true", help="Default logging")
- exgroup.add_argument(
+ volumegroup = parser.add_mutually_exclusive_group()
+ volumegroup.add_argument("--verbose", action="store_true", help="Default logging")
+ volumegroup.add_argument(
"--quiet", action="store_true", help="Only print warnings and errors."
)
- exgroup.add_argument("--debug", action="store_true", help="Print even more logging")
+ volumegroup.add_argument(
+ "--debug", action="store_true", help="Print even more logging"
+ )
parser.add_argument(
"--strict-memory-limit",
@@ -506,19 +511,18 @@ def arg_parser() -> argparse.ArgumentParser:
default=False,
)
- exgroup = parser.add_mutually_exclusive_group()
- exgroup.add_argument(
+ colorgroup = parser.add_mutually_exclusive_group()
+ colorgroup.add_argument(
"--enable-color",
action="store_true",
help="Enable logging color (default enabled)",
- default=not onWindows(),
+ default=True,
)
- exgroup.add_argument(
+ colorgroup.add_argument(
"--disable-color",
action="store_false",
dest="enable_color",
help="Disable colored logging (default false)",
- default=onWindows(),
)
parser.add_argument(
@@ -545,15 +549,15 @@ def arg_parser() -> argparse.ArgumentParser:
help=argparse.SUPPRESS,
)
- exgroup = parser.add_mutually_exclusive_group()
- exgroup.add_argument(
+ reggroup = parser.add_mutually_exclusive_group()
+ reggroup.add_argument(
"--enable-ga4gh-tool-registry",
action="store_true",
help="Enable tool resolution using GA4GH tool registry API",
dest="enable_ga4gh_tool_registry",
default=True,
)
- exgroup.add_argument(
+ reggroup.add_argument(
"--disable-ga4gh-tool-registry",
action="store_false",
help="Disable tool resolution using GA4GH tool registry API",
@@ -579,15 +583,15 @@ def arg_parser() -> argparse.ArgumentParser:
choices=("stop", "continue"),
)
- exgroup = parser.add_mutually_exclusive_group()
- exgroup.add_argument(
+ checkgroup = parser.add_mutually_exclusive_group()
+ checkgroup.add_argument(
"--compute-checksum",
action="store_true",
default=True,
help="Compute checksum of contents while collecting outputs",
dest="compute_checksum",
)
- exgroup.add_argument(
+ checkgroup.add_argument(
"--no-compute-checksum",
action="store_false",
help="Do not compute checksum of contents while collecting outputs",
@@ -602,9 +606,6 @@ def arg_parser() -> argparse.ArgumentParser:
"spaces and hash characters.",
dest="relax_path_checks",
)
- exgroup.add_argument(
- "--make-template", action="store_true", help="Generate a template input object"
- )
parser.add_argument(
"--force-docker-pull",
@@ -628,13 +629,31 @@ def arg_parser() -> argparse.ArgumentParser:
help="Read process requirement overrides from file.",
)
- parser.add_argument(
+ subgroup = parser.add_mutually_exclusive_group()
+ subgroup.add_argument(
"--target",
"-t",
action="append",
help="Only execute steps that contribute to listed targets (can be "
"provided more than once).",
)
+ subgroup.add_argument(
+ "--single-step",
+ type=str,
+ default=None,
+ help="Only executes a single step in a workflow. The input object must "
+ "match that step's inputs. Can be combined with --print-subgraph.",
+ )
+ subgroup.add_argument(
+ "--single-process",
+ type=str,
+ default=None,
+ help="Only executes the underlying Process (CommandLineTool, "
+ "ExpressionTool, or sub-Workflow) for the given step in a workflow. "
+ "This will not include any step-level processing: scatter, when, no "
+ "processing of step-level default, or valueFrom input modifiers. "
+ "The input object must match that Process's inputs.",
+ )
parser.add_argument(
"--mpi-config-file",
@@ -684,7 +703,7 @@ def __init__(
"""Fail if nargs is used."""
if nargs is not None:
raise ValueError("nargs not allowed")
- super(FSAction, self).__init__(option_strings, dest, **kwargs)
+ super().__init__(option_strings, dest, **kwargs)
def __call__(
self,
@@ -712,7 +731,7 @@ def __init__(
"""Initialize."""
if nargs is not None:
raise ValueError("nargs not allowed")
- super(FSAppendAction, self).__init__(option_strings, dest, **kwargs)
+ super().__init__(option_strings, dest, **kwargs)
def __call__(
self,
diff --git a/cwltool/builder.py b/cwltool/builder.py
index 1cdb37df94..f88ccc7591 100644
--- a/cwltool/builder.py
+++ b/cwltool/builder.py
@@ -37,10 +37,8 @@
CWLObjectType,
CWLOutputType,
aslist,
- docker_windows_path_adjust,
get_listing,
normalizeFilesDirs,
- onWindows,
visit_class,
)
@@ -48,6 +46,12 @@
from .pathmapper import PathMapper
from .provenance_profile import ProvenanceProfile # pylint: disable=unused-import
+INPUT_OBJ_VOCAB: Dict[str, str] = {
+ "Any": "https://w3id.org/cwl/salad#Any",
+ "File": "https://w3id.org/cwl/cwl#File",
+ "Directory": "https://w3id.org/cwl/cwl#Directory",
+}
+
def content_limit_respected_read_bytes(f): # type: (IO[bytes]) -> bytes
contents = f.read(CONTENT_LIMIT + 1)
@@ -118,7 +122,7 @@ def check_format(
continue
if "format" not in afile:
raise ValidationException(
- "File has no 'format' defined: {}".format(json_dumps(afile, indent=4))
+ f"File has no 'format' defined: {json_dumps(afile, indent=4)}"
)
for inpf in aslist(input_formats):
if afile["format"] == inpf or formatSubclassOf(
@@ -126,11 +130,13 @@ def check_format(
):
return
raise ValidationException(
- "File has an incompatible format: {}".format(json_dumps(afile, indent=4))
+ f"File has an incompatible format: {json_dumps(afile, indent=4)}"
)
-class HasReqsHints(object):
+class HasReqsHints:
+ """Base class for get_requirement()."""
+
def __init__(self) -> None:
"""Initialize this reqs decorator."""
self.requirements = [] # type: List[CWLObjectType]
@@ -223,6 +229,7 @@ def bind_input(
lead_pos: Optional[Union[int, List[int]]] = None,
tail_pos: Optional[Union[str, List[int]]] = None,
) -> List[MutableMapping[str, Union[str, List[int]]]]:
+ debug = _logger.isEnabledFor(logging.DEBUG)
if tail_pos is None:
tail_pos = []
@@ -244,8 +251,17 @@ def bind_input(
position = binding["position"]
if isinstance(position, str): # no need to test the CWL Version
# the schema for v1.0 only allow ints
- binding["position"] = self.do_eval(position, context=datum)
- bp.append(binding["position"])
+ result = self.do_eval(position, context=datum)
+ if not isinstance(result, int):
+ raise SourceLine(
+ schema["inputBinding"], "position", WorkflowException, debug
+ ).makeError(
+ "'position' expressions must evaluate to an int, "
+ f"not a {type(result)}. Expression {position} "
+ f"resulted in '{result}'."
+ )
+ binding["position"] = result
+ bp.append(result)
else:
bp.extend(aslist(binding["position"]))
else:
@@ -272,7 +288,7 @@ def bind_input(
avsc = self.names.get_name(cast(str, t["name"]), None)
if not avsc:
avsc = make_avsc_object(convert_to_dict(t), self.names)
- if validate(avsc, datum):
+ if validate(avsc, datum, vocab=INPUT_OBJ_VOCAB):
schema = copy.deepcopy(schema)
schema["type"] = t
if not value_from_expression:
@@ -294,7 +310,7 @@ def bind_input(
bound_input = True
if not bound_input:
raise ValidationException(
- "'%s' is not a valid union %s" % (datum, schema["type"])
+ "'{}' is not a valid union {}".format(datum, schema["type"])
)
elif isinstance(schema["type"], MutableMapping):
st = copy.deepcopy(schema["type"])
@@ -376,7 +392,7 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType:
self.files.append(f)
return f
- if schema["type"] == "File":
+ if schema["type"] == "org.w3id.cwl.cwl.File":
datum = cast(CWLObjectType, datum)
self.files.append(datum)
@@ -390,7 +406,10 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType:
if loadContents_sourceline and loadContents_sourceline["loadContents"]:
with SourceLine(
- loadContents_sourceline, "loadContents", WorkflowException
+ loadContents_sourceline,
+ "loadContents",
+ WorkflowException,
+ debug,
):
try:
with self.fs_access.open(
@@ -398,22 +417,49 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType:
) as f2:
datum["contents"] = content_limit_respected_read(f2)
except Exception as e:
- raise Exception("Reading %s\n%s" % (datum["location"], e))
+ raise Exception(
+ "Reading {}\n{}".format(datum["location"], e)
+ )
if "secondaryFiles" in schema:
if "secondaryFiles" not in datum:
datum["secondaryFiles"] = []
- for sf in aslist(schema["secondaryFiles"]):
- if "required" in sf:
- sf_required = self.do_eval(sf["required"], context=datum)
+ for num, sf_entry in enumerate(aslist(schema["secondaryFiles"])):
+ if "required" in sf_entry and sf_entry["required"] is not None:
+ required_result = self.do_eval(
+ sf_entry["required"], context=datum
+ )
+ if not (
+ isinstance(required_result, bool)
+ or required_result is None
+ ):
+ if (
+ aslist(schema["secondaryFiles"])
+ == schema["secondaryFiles"]
+ ):
+ sf_item: Any = cast(
+ List[Any], schema["secondaryFiles"]
+ )[num]
+ else:
+ sf_item = schema["secondaryFiles"]
+ raise SourceLine(
+ sf_item, "required", WorkflowException, debug
+ ).makeError(
+ "The result of a expression in the field "
+ "'required' must "
+ f"be a bool or None, not a {type(required_result)}. "
+ f"Expression '{sf_entry['required']}' resulted "
+ f"in '{required_result}'."
+ )
+ sf_required = required_result
else:
sf_required = True
- if "$(" in sf["pattern"] or "${" in sf["pattern"]:
- sfpath = self.do_eval(sf["pattern"], context=datum)
+ if "$(" in sf_entry["pattern"] or "${" in sf_entry["pattern"]:
+ sfpath = self.do_eval(sf_entry["pattern"], context=datum)
else:
sfpath = substitute(
- cast(str, datum["basename"]), sf["pattern"]
+ cast(str, datum["basename"]), sf_entry["pattern"]
)
for sfname in aslist(sfpath):
@@ -435,9 +481,13 @@ def _capture_files(f: CWLObjectType) -> CWLObjectType:
sf_location = sfname["location"]
sfbasename = sfname["basename"]
else:
- raise WorkflowException(
- "Expected secondaryFile expression to return type 'str' or 'MutableMapping', received '%s'"
- % (type(sfname))
+ raise SourceLine(
+ sf_entry, "pattern", WorkflowException, debug
+ ).makeError(
+ "Expected secondaryFile expression to "
+ "return type 'str', a 'File' or 'Directory' "
+ "dictionary, or a list of the same. Received "
+ f"'{type(sfname)} from '{sf_entry['pattern']}'."
)
for d in cast(
@@ -486,7 +536,12 @@ def addsf(
},
)
elif sf_required:
- raise WorkflowException(
+ raise SourceLine(
+ schema,
+ "secondaryFiles",
+ WorkflowException,
+ debug,
+ ).makeError(
"Missing required secondary file '%s' from file object: %s"
% (sfname, json_dumps(datum, indent=4))
)
@@ -496,10 +551,38 @@ def addsf(
)
if "format" in schema:
+ eval_format: Any = self.do_eval(schema["format"])
+ if isinstance(eval_format, str):
+ evaluated_format: Union[str, List[str]] = eval_format
+ elif isinstance(eval_format, MutableSequence):
+ for entry in eval_format:
+ if not isinstance(entry, str):
+ raise SourceLine(
+ schema, "format", WorkflowException, debug
+ ).makeError(
+ "An expression in the 'format' field must "
+ "evaluate to a string, or list of strings. "
+ "However a non-string item was received: "
+ f"'{entry}' of type '{type(entry)}'. "
+ f"The expression was '{schema['format']}' and "
+ f"its fully evaluated result is '{eval_format}'."
+ )
+ evaluated_format = cast(List[str], eval_format)
+ else:
+ raise SourceLine(
+ schema, "format", WorkflowException, debug
+ ).makeError(
+ "An expression in the 'format' field must "
+ "evaluate to a string, or list of strings. "
+ "However the type of the expression result was "
+ f"{type(eval_format)}. "
+ f"The expression was '{schema['format']}' and "
+ f"its fully evaluated result is 'eval_format'."
+ )
try:
check_format(
datum,
- cast(Union[List[str], str], self.do_eval(schema["format"])),
+ evaluated_format,
self.formatgraph,
)
except ValidationException as ve:
@@ -514,7 +597,7 @@ def addsf(
_capture_files,
)
- if schema["type"] == "Directory":
+ if schema["type"] == "org.w3id.cwl.cwl.Directory":
datum = cast(CWLObjectType, datum)
ll = schema.get("loadListing") or self.loadListing
if ll and ll != "no_listing":
@@ -545,28 +628,21 @@ def tostr(self, value: Union[MutableMapping[str, str], Any]) -> str:
):
if "path" not in value:
raise WorkflowException(
- u'%s object missing "path": %s' % (value["class"], value)
+ '{} object missing "path": {}'.format(value["class"], value)
)
-
- # Path adjust for windows file path when passing to docker, docker accepts unix like path only
- (docker_req, docker_is_req) = self.get_requirement("DockerRequirement")
- if onWindows() and docker_req is not None:
- # docker_req is none only when there is no dockerRequirement
- # mentioned in hints and Requirement
- path = docker_windows_path_adjust(value["path"])
- return path
return value["path"]
else:
return str(value)
def generate_arg(self, binding: CWLObjectType) -> List[str]:
value = binding.get("datum")
+ debug = _logger.isEnabledFor(logging.DEBUG)
if "valueFrom" in binding:
with SourceLine(
binding,
"valueFrom",
WorkflowException,
- _logger.isEnabledFor(logging.DEBUG),
+ debug,
):
value = self.do_eval(cast(str, binding["valueFrom"]), context=value)
@@ -577,7 +653,7 @@ def generate_arg(self, binding: CWLObjectType) -> List[str]:
binding,
"separate",
WorkflowException,
- _logger.isEnabledFor(logging.DEBUG),
+ debug,
):
raise WorkflowException(
"'separate' option can not be specified without prefix"
diff --git a/cwltool/checker.py b/cwltool/checker.py
index 0f9d39444a..4ccca2fefd 100644
--- a/cwltool/checker.py
+++ b/cwltool/checker.py
@@ -60,7 +60,7 @@ def check_types(
return check_types(
merge_flatten_type(_get_type(srctype)), _get_type(sinktype), None, None
)
- raise WorkflowException("Unrecognized linkMerge enum '{}'".format(linkMerge))
+ raise WorkflowException(f"Unrecognized linkMerge enum '{linkMerge}'")
def merge_flatten_type(src: SinkType) -> CWLOutputType:
@@ -211,18 +211,16 @@ def static_checker(
sink = warning.sink
linkMerge = warning.linkMerge
sinksf = sorted(
- [
- p["pattern"]
- for p in sink.get("secondaryFiles", [])
- if p.get("required", True)
- ]
+ p["pattern"]
+ for p in sink.get("secondaryFiles", [])
+ if p.get("required", True)
)
- srcsf = sorted([p["pattern"] for p in src.get("secondaryFiles", [])])
+ srcsf = sorted(p["pattern"] for p in src.get("secondaryFiles", []))
# Every secondaryFile required by the sink, should be declared
# by the source
missing = missing_subset(srcsf, sinksf)
if missing:
- msg1 = "Parameter '%s' requires secondaryFiles %s but" % (
+ msg1 = "Parameter '{}' requires secondaryFiles {} but".format(
shortname(sink["id"]),
missing,
)
@@ -241,7 +239,7 @@ def static_checker(
% shortname(sink["id"])
)
msg = SourceLine(sink).makeError(
- "%s\n%s" % (msg1, bullets([msg3, msg4, msg5], " "))
+ "{}\n{}".format(msg1, bullets([msg3, msg4, msg5], " "))
)
elif sink.get("not_connected"):
if not sink.get("used_by_step"):
diff --git a/cwltool/command_line_tool.py b/cwltool/command_line_tool.py
index 71d1e9866b..99f8b42189 100644
--- a/cwltool/command_line_tool.py
+++ b/cwltool/command_line_tool.py
@@ -10,6 +10,8 @@
import shutil
import threading
import urllib
+import urllib.parse
+from enum import Enum
from functools import cmp_to_key, partial
from typing import (
Any,
@@ -21,6 +23,7 @@
MutableMapping,
MutableSequence,
Optional,
+ Pattern,
Set,
TextIO,
Union,
@@ -37,7 +40,12 @@
from schema_salad.validate import validate_ex
from typing_extensions import TYPE_CHECKING, Type
-from .builder import Builder, content_limit_respected_read_bytes, substitute
+from .builder import (
+ INPUT_OBJ_VOCAB,
+ Builder,
+ content_limit_respected_read_bytes,
+ substitute,
+)
from .context import LoadingContext, RuntimeContext, getdefault
from .docker import DockerCommandLineJob
from .errors import UnsupportedRequirement, WorkflowException
@@ -57,7 +65,7 @@
from .singularity import SingularityCommandLineJob
from .stdfsaccess import StdFsAccess
from .udocker import UDockerCommandLineJob
-from .update import ORDERED_VERSIONS
+from .update import ORDERED_VERSIONS, ORIGINAL_CWLVERSION
from .utils import (
CWLObjectType,
CWLOutputType,
@@ -67,41 +75,32 @@
adjustDirObjs,
adjustFileObjs,
aslist,
- convert_pathsep_to_unix,
- docker_windows_path_adjust,
get_listing,
normalizeFilesDirs,
- onWindows,
random_outdir,
shared_file_lock,
trim_listing,
upgrade_lock,
visit_class,
- windows_default_container_id,
)
if TYPE_CHECKING:
from .provenance_profile import ProvenanceProfile # pylint: disable=unused-import
-ACCEPTLIST_EN_STRICT_RE = re.compile(r"^[a-zA-Z0-9._+-]+$")
-ACCEPTLIST_EN_RELAXED_RE = re.compile(r".*") # Accept anything
-ACCEPTLIST_RE = ACCEPTLIST_EN_STRICT_RE
-DEFAULT_CONTAINER_MSG = """
-We are on Microsoft Windows and not all components of this CWL description have a
-container specified. This means that these steps will be executed in the default container,
-which is %s.
-Note, this could affect portability if this CWL description relies on non-POSIX features
-or commands in this container. For best results add the following to your CWL
-description's hints section:
+class PathCheckingMode(Enum):
+ """What characters are allowed in path names.
+
+ We have the strict, default mode and the relaxed mode.
+ """
-hints:
- DockerRequirement:
- dockerPull: %s
-"""
+ STRICT = re.compile(
+ r"^[\w.+\-\u2600-\u26FF\U0001f600-\U0001f64f]+$"
+ ) # accept unicode word characters and emojis
+ RELAXED = re.compile(r".*") # Accept anything
-class ExpressionJob(object):
+class ExpressionJob:
"""Job for ExpressionTools."""
def __init__(
@@ -211,7 +210,7 @@ def revmap_file(
if "location" in f and "path" not in f:
location = cast(str, f["location"])
if location.startswith("file://"):
- f["path"] = convert_pathsep_to_unix(uri_file_path(location))
+ f["path"] = uri_file_path(location)
else:
return f
@@ -264,7 +263,9 @@ def revmap_file(
)
-class CallbackJob(object):
+class CallbackJob:
+ """Callback Job class, used by CommandLine.job()."""
+
def __init__(
self,
job: "CommandLineTool",
@@ -296,7 +297,9 @@ def run(
)
-def check_adjust(builder: Builder, file_o: CWLObjectType) -> CWLObjectType:
+def check_adjust(
+ accept_re: Pattern[str], builder: Builder, file_o: CWLObjectType
+) -> CWLObjectType:
"""
Map files to assigned path inside a container.
@@ -307,9 +310,7 @@ def check_adjust(builder: Builder, file_o: CWLObjectType) -> CWLObjectType:
raise ValueError(
"Do not call check_adjust using a builder that doesn't have a pathmapper."
)
- file_o["path"] = path = docker_windows_path_adjust(
- builder.pathmapper.mapper(cast(str, file_o["location"]))[1]
- )
+ file_o["path"] = path = builder.pathmapper.mapper(cast(str, file_o["location"]))[1]
basename = cast(str, file_o.get("basename"))
dn, bn = os.path.split(path)
if file_o.get("dirname") != dn:
@@ -322,11 +323,9 @@ def check_adjust(builder: Builder, file_o: CWLObjectType) -> CWLObjectType:
file_o["nameroot"] = str(nr)
if file_o.get("nameext") != ne:
file_o["nameext"] = str(ne)
- if not ACCEPTLIST_RE.match(basename):
+ if not accept_re.match(basename):
raise WorkflowException(
- "Invalid filename: '{}' contains illegal characters".format(
- file_o["basename"]
- )
+ f"Invalid filename: '{file_o['basename']}' contains illegal characters"
)
return file_o
@@ -349,8 +348,8 @@ def check_valid_locations(fs_access: StdFsAccess, ob: CWLObjectType) -> None:
class ParameterOutputWorkflowException(WorkflowException):
def __init__(self, msg: str, port: CWLObjectType, **kwargs: Any) -> None:
"""Exception for when there was an error collecting output for a parameter."""
- super(ParameterOutputWorkflowException, self).__init__(
- "Error collecting output for parameter '%s':\n%s"
+ super().__init__(
+ "Error collecting output for parameter '%s': %s"
% (shortname(cast(str, port["id"])), msg),
kwargs,
)
@@ -361,8 +360,13 @@ def __init__(
self, toolpath_object: CommentedMap, loadingContext: LoadingContext
) -> None:
"""Initialize this CommandLineTool."""
- super(CommandLineTool, self).__init__(toolpath_object, loadingContext)
+ super().__init__(toolpath_object, loadingContext)
self.prov_obj = loadingContext.prov_obj
+ self.path_check_mode = (
+ PathCheckingMode.RELAXED
+ if loadingContext.relax_path_checks
+ else PathCheckingMode.STRICT
+ ) # type: PathCheckingMode
def make_job_runner(self, runtimeContext: RuntimeContext) -> Type[JobBase]:
dockerReq, dockerRequired = self.get_requirement("DockerRequirement")
@@ -383,17 +387,6 @@ def make_job_runner(self, runtimeContext: RuntimeContext) -> Type[JobBase]:
self.requirements.insert(0, dockerReq)
dockerRequired = True
- if (
- default_container == windows_default_container_id
- and runtimeContext.use_container
- and onWindows()
- ):
- _logger.warning(
- DEFAULT_CONTAINER_MSG,
- windows_default_container_id,
- windows_default_container_id,
- )
-
if dockerReq is not None and runtimeContext.use_container:
if mpiReq is not None:
_logger.warning("MPIRequirement with containers is a beta feature")
@@ -473,13 +466,87 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
initialWorkdir, _ = self.get_requirement("InitialWorkDirRequirement")
if initialWorkdir is None:
return
+ debug = _logger.isEnabledFor(logging.DEBUG)
+ cwl_version = cast(Optional[str], self.metadata.get(ORIGINAL_CWLVERSION, None))
+ classic_dirent = cwl_version and ORDERED_VERSIONS.index(
+ cwl_version
+ ) < ORDERED_VERSIONS.index("v1.2.0-dev2")
+ classic_listing = cwl_version and ORDERED_VERSIONS.index(
+ cwl_version
+ ) < ORDERED_VERSIONS.index("v1.1.0-dev1")
ls = [] # type: List[CWLObjectType]
if isinstance(initialWorkdir["listing"], str):
# "listing" is just a string (must be an expression) so
# just evaluate it and use the result as if it was in
# listing
- ls = cast(List[CWLObjectType], builder.do_eval(initialWorkdir["listing"]))
+ ls_evaluated = builder.do_eval(initialWorkdir["listing"])
+ fail: Any = False
+ fail_suffix: str = ""
+ if not isinstance(ls_evaluated, MutableSequence):
+ fail = ls_evaluated
+ else:
+ for entry in ls_evaluated:
+ if isinstance(entry, MutableSequence):
+ if classic_listing:
+ raise SourceLine(
+ initialWorkdir, "listing", WorkflowException, debug
+ ).makeError(
+ "InitialWorkDirRequirement.listing expressions "
+ "cannot return arrays of Files or Directories "
+ "before CWL v1.1. Please "
+ "considering using 'cwl-upgrader' to upgrade "
+ "your document to CWL v1.1' or later."
+ )
+ else:
+ for entry2 in entry:
+ if not (
+ isinstance(entry2, MutableMapping)
+ and (
+ "class" in entry2
+ and entry2["class"] == "File"
+ or "Directory"
+ )
+ ):
+ fail = (
+ "an array with an item ('{entry2}') that is "
+ "not a File nor a Directory object."
+ )
+ elif not (
+ (
+ isinstance(entry, MutableMapping)
+ and (
+ "class" in entry
+ and (entry["class"] == "File" or "Directory")
+ or "entry" in "entry"
+ )
+ )
+ ):
+ fail = entry
+ elif entry is None:
+ if classic_dirent:
+ fail = entry
+ fail_suffix = (
+ " Dirent.entry cannot return 'null' before CWL "
+ "v1.2. Please consider using 'cwl-upgrader' to "
+ "upgrade your document to CWL version v1.2."
+ )
+ if fail is not False:
+ message = (
+ "Expression in a 'InitialWorkdirRequirement.listing' field "
+ "must return a list containing zero or more of: File or "
+ "Directory objects; Dirent objects"
+ )
+ if classic_dirent:
+ message += ". "
+ else:
+ message += "; null; or arrays of File or Directory objects. "
+ message += f"Got '{fail}' among the results from "
+ message += f"'{initialWorkdir['listing'].strip()}'." + fail_suffix
+ raise SourceLine(
+ initialWorkdir, "listing", WorkflowException, debug
+ ).makeError(message)
+ ls = cast(List[CWLObjectType], ls_evaluated)
else:
# "listing" is an array of either expressions or Dirent so
# evaluate each item
@@ -489,16 +556,26 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
):
if isinstance(t, Mapping) and "entry" in t:
# Dirent
- entry = builder.do_eval(
- cast(str, t["entry"]), strip_whitespace=False
- )
+ entry_field = cast(str, t["entry"])
+ # the schema guarentees that 'entry' is a string, so the cast is safe
+ entry = builder.do_eval(entry_field, strip_whitespace=False)
if entry is None:
continue
if isinstance(entry, MutableSequence):
+ if classic_listing:
+ raise SourceLine(
+ t, "entry", WorkflowException, debug
+ ).makeError(
+ "'entry' expressions are not allowed to evaluate "
+ "to an array of Files or Directories until CWL "
+ "v1.2. Consider using 'cwl-upgrader' to upgrade "
+ "your document to CWL version 1.2."
+ )
# Nested list. If it is a list of File or
# Directory objects, add it to the
- # file list, otherwise JSON serialize it.
+ # file list, otherwise JSON serialize it if CWL v1.2.
+
filelist = True
for e in entry:
if not isinstance(e, MutableMapping) or e.get(
@@ -510,7 +587,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
if filelist:
if "entryname" in t:
raise SourceLine(
- t, "entryname", WorkflowException
+ t, "entryname", WorkflowException, debug
).makeError(
"'entryname' is invalid when 'entry' returns list of File or Directory"
)
@@ -527,19 +604,39 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
):
et["entry"] = entry
else:
- et["entry"] = (
- entry
- if isinstance(entry, str)
- else json_dumps(entry, sort_keys=True)
- )
+ if isinstance(entry, str):
+ et["entry"] = entry
+ else:
+ if classic_dirent:
+ raise SourceLine(
+ t, "entry", WorkflowException, debug
+ ).makeError(
+ "'entry' expression resulted in "
+ "something other than number, object or "
+ "array besides a single File or Dirent object. "
+ "In CWL v1.2+ this would be serialized to a JSON object. "
+ "However this is a {cwl_version} document. "
+ "If that is the desired result then please "
+ "consider using 'cwl-upgrader' to upgrade "
+ "your document to CWL version 1.2. "
+ f"Result of '{entry_field}' was '{entry}'."
+ )
+ et["entry"] = json_dumps(entry, sort_keys=True)
if "entryname" in t:
- en = builder.do_eval(cast(str, t["entryname"]))
- if not isinstance(en, str):
- raise SourceLine(
- t, "entryname", WorkflowException
- ).makeError("'entryname' must be a string")
- et["entryname"] = en
+ entryname_field = cast(str, t["entryname"])
+ if "${" in entryname_field or "$(" in entryname_field:
+ en = builder.do_eval(cast(str, t["entryname"]))
+ if not isinstance(en, str):
+ raise SourceLine(
+ t, "entryname", WorkflowException, debug
+ ).makeError(
+ "'entryname' expression must result a string. "
+ f"Got '{en}' from '{entryname_field}'"
+ )
+ et["entryname"] = en
+ else:
+ et["entryname"] = entryname_field
else:
et["entryname"] = None
et["writable"] = t.get("writable", False)
@@ -558,7 +655,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
for i, t2 in enumerate(ls):
if not isinstance(t2, Mapping):
raise SourceLine(
- initialWorkdir, "listing", WorkflowException
+ initialWorkdir, "listing", WorkflowException, debug
).makeError(
"Entry at index %s of listing is not a record, was %s"
% (i, type(t2))
@@ -571,7 +668,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
if isinstance(t2["entry"], str):
if not t2["entryname"]:
raise SourceLine(
- initialWorkdir, "listing", WorkflowException
+ initialWorkdir, "listing", WorkflowException, debug
).makeError("Entry at index %s of listing missing entryname" % (i))
ls[i] = {
"class": "File",
@@ -583,7 +680,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
if not isinstance(t2["entry"], Mapping):
raise SourceLine(
- initialWorkdir, "listing", WorkflowException
+ initialWorkdir, "listing", WorkflowException, debug
).makeError(
"Entry at index %s of listing is not a record, was %s"
% (i, type(t2["entry"]))
@@ -591,7 +688,7 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
if t2["entry"].get("class") not in ("File", "Directory"):
raise SourceLine(
- initialWorkdir, "listing", WorkflowException
+ initialWorkdir, "listing", WorkflowException, debug
).makeError(
"Entry at index %s of listing is not a File or Directory object, was %s"
% (i, t2)
@@ -610,10 +707,10 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
if t3.get("class") not in ("File", "Directory"):
# Check that every item is a File or Directory object now
raise SourceLine(
- initialWorkdir, "listing", WorkflowException
+ initialWorkdir, "listing", WorkflowException, debug
).makeError(
- "Entry at index %s of listing is not a Dirent, File or Directory object, was %s"
- % (i, t2)
+ f"Entry at index {i} of listing is not a Dirent, File or "
+ f"Directory object, was {t2}."
)
if "basename" not in t3:
continue
@@ -621,36 +718,35 @@ def _initialworkdir(self, j: JobBase, builder: Builder) -> None:
t3["basename"] = basename
if basename.startswith("../"):
raise SourceLine(
- initialWorkdir, "listing", WorkflowException
+ initialWorkdir, "listing", WorkflowException, debug
).makeError(
- "Name '%s' at index %s of listing is invalid, cannot start with '../'"
- % (basename, i)
+ f"Name '{basename}' at index {i} of listing is invalid, "
+ "cannot start with '../'"
)
if basename.startswith("/"):
# only if DockerRequirement in requirements
- cwl_version = self.metadata.get(
- "http://commonwl.org/cwltool#original_cwlVersion", None
- )
- if isinstance(cwl_version, str) and ORDERED_VERSIONS.index(
+ if cwl_version and ORDERED_VERSIONS.index(
cwl_version
) < ORDERED_VERSIONS.index("v1.2.0-dev4"):
raise SourceLine(
- initialWorkdir, "listing", WorkflowException
+ initialWorkdir, "listing", WorkflowException, debug
).makeError(
- "Name '%s' at index %s of listing is invalid, paths starting with '/' only permitted in CWL 1.2 and later"
- % (basename, i)
+ f"Name '{basename}' at index {i} of listing is invalid, "
+ "paths starting with '/' only permitted in CWL 1.2 "
+ "and later."
)
req, is_req = self.get_requirement("DockerRequirement")
if is_req is not True:
raise SourceLine(
- initialWorkdir, "listing", WorkflowException
+ initialWorkdir, "listing", WorkflowException, debug
).makeError(
- "Name '%s' at index %s of listing is invalid, name can only start with '/' when DockerRequirement is in 'requirements'"
- % (basename, i)
+ f"Name '{basename}' at index {i} of listing is invalid, "
+ "name can only start with '/' when DockerRequirement "
+ "is in 'requirements'."
)
- with SourceLine(initialWorkdir, "listing", WorkflowException):
+ with SourceLine(initialWorkdir, "listing", WorkflowException, debug):
j.generatefiles["listing"] = ls
for entry in ls:
if "basename" in entry:
@@ -680,7 +776,7 @@ def remove_dirname(d: CWLObjectType) -> None:
visit_class(
[builder.files, builder.bindings],
("File", "Directory"),
- partial(check_adjust, builder),
+ partial(check_adjust, self.path_check_mode.value, builder),
)
def job(
@@ -708,7 +804,9 @@ def job(
cachebuilder.stagedir,
separateDirs=False,
)
- _check_adjust = partial(check_adjust, cachebuilder)
+ _check_adjust = partial(
+ check_adjust, self.path_check_mode.value, cachebuilder
+ )
visit_class(
[cachebuilder.files, cachebuilder.bindings],
("File", "Directory"),
@@ -789,7 +887,7 @@ def calc_checksum(location: str) -> Optional[str]:
jobcache = os.path.join(runtimeContext.cachedir, cachekey)
# Create a lockfile to manage cache status.
- jobcachepending = "{}.status".format(jobcache)
+ jobcachepending = f"{jobcache}.status"
jobcachelock = None
jobstatus = None
@@ -882,7 +980,7 @@ def update_status_output_callback(
)
builder.requirements = j.requirements
- _check_adjust = partial(check_adjust, builder)
+ _check_adjust = partial(check_adjust, self.path_check_mode.value, builder)
visit_class(
[builder.files, builder.bindings], ("File", "Directory"), _check_adjust
@@ -905,13 +1003,25 @@ def update_status_output_callback(
if self.tool.get("stdin"):
with SourceLine(self.tool, "stdin", ValidationException, debug):
- j.stdin = cast(str, builder.do_eval(self.tool["stdin"]))
+ stdin_eval = builder.do_eval(self.tool["stdin"])
+ if not (isinstance(stdin_eval, str) or stdin_eval is None):
+ raise ValidationException(
+ f"'stdin' expression must return a string or null. Got '{stdin_eval}' "
+ f"for '{self.tool['stdin']}'."
+ )
+ j.stdin = stdin_eval
if j.stdin:
reffiles.append({"class": "File", "path": j.stdin})
if self.tool.get("stderr"):
with SourceLine(self.tool, "stderr", ValidationException, debug):
- j.stderr = cast(str, builder.do_eval(self.tool["stderr"]))
+ stderr_eval = builder.do_eval(self.tool["stderr"])
+ if not isinstance(stderr_eval, str):
+ raise ValidationException(
+ f"'stderr' expression must return a string. Got '{stderr_eval}' "
+ f"for '{self.tool['stderr']}'."
+ )
+ j.stderr = stderr_eval
if j.stderr:
if os.path.isabs(j.stderr) or ".." in j.stderr:
raise ValidationException(
@@ -920,7 +1030,13 @@ def update_status_output_callback(
if self.tool.get("stdout"):
with SourceLine(self.tool, "stdout", ValidationException, debug):
- j.stdout = cast(str, builder.do_eval(self.tool["stdout"]))
+ stdout_eval = builder.do_eval(self.tool["stdout"])
+ if not isinstance(stdout_eval, str):
+ raise ValidationException(
+ f"'stdout' expression must return a string. Got '{stdout_eval}' "
+ f"for '{self.tool['stdout']}'."
+ )
+ j.stdout = stdout_eval
if j.stdout:
if os.path.isabs(j.stdout) or ".." in j.stdout or not j.stdout:
raise ValidationException(
@@ -980,36 +1096,62 @@ def register_reader(f: CWLObjectType) -> None:
timelimit, _ = self.get_requirement("ToolTimeLimit")
if timelimit is not None:
with SourceLine(timelimit, "timelimit", ValidationException, debug):
- j.timelimit = cast(
- Optional[int],
- builder.do_eval(cast(Union[int, str], timelimit["timelimit"])),
- )
- if not isinstance(j.timelimit, int) or j.timelimit < 0:
+ limit_field = cast(Dict[str, Union[str, int]], timelimit)["timelimit"]
+ if isinstance(limit_field, str):
+ timelimit_eval = builder.do_eval(limit_field)
+ if timelimit_eval and not isinstance(timelimit_eval, int):
+ raise WorkflowException(
+ "'timelimit' expression must evaluate to a long/int. Got "
+ f"'{timelimit_eval}' for expression '{limit_field}'."
+ )
+ else:
+ timelimit_eval = limit_field
+ if not isinstance(timelimit_eval, int) or timelimit_eval < 0:
raise WorkflowException(
- "timelimit must be an integer >= 0, got: %s" % j.timelimit
+ f"timelimit must be an integer >= 0, got: {timelimit_eval}"
)
+ j.timelimit = timelimit_eval
networkaccess, _ = self.get_requirement("NetworkAccess")
if networkaccess is not None:
with SourceLine(networkaccess, "networkAccess", ValidationException, debug):
- j.networkaccess = cast(
- bool,
- builder.do_eval(
- cast(Union[bool, str], networkaccess["networkAccess"])
- ),
- )
- if not isinstance(j.networkaccess, bool):
+ networkaccess_field = networkaccess["networkAccess"]
+ if isinstance(networkaccess_field, str):
+ networkaccess_eval = builder.do_eval(networkaccess_field)
+ if not isinstance(networkaccess_eval, bool):
+ raise WorkflowException(
+ "'networkAccess' expression must evaluate to a bool. "
+ f"Got '{networkaccess_eval}' for expression '{networkaccess_field}'."
+ )
+ else:
+ networkaccess_eval = networkaccess_field
+ if not isinstance(networkaccess_eval, bool):
raise WorkflowException(
- "networkAccess must be a boolean, got: %s" % j.networkaccess
+ "networkAccess must be a boolean, got: {networkaccess_eval}."
)
+ j.networkaccess = networkaccess_eval
- j.environment = {}
+ # Build a mapping to hold any EnvVarRequirement
+ required_env = {}
evr, _ = self.get_requirement("EnvVarRequirement")
if evr is not None:
- for t3 in cast(List[Dict[str, str]], evr["envDef"]):
- j.environment[t3["envName"]] = cast(
- str, builder.do_eval(t3["envValue"])
- )
+ for eindex, t3 in enumerate(cast(List[Dict[str, str]], evr["envDef"])):
+ env_value_field = t3["envValue"]
+ if "${" in env_value_field or "$(" in env_value_field:
+ env_value_eval = builder.do_eval(env_value_field)
+ if not isinstance(env_value_eval, str):
+ raise SourceLine(
+ evr["envDef"], eindex, WorkflowException, debug
+ ).makeError(
+ "'envValue expression must evaluate to a str. "
+ f"Got '{env_value_eval}' for expression '{env_value_field}'."
+ )
+ env_value = env_value_eval
+ else:
+ env_value = env_value_field
+ required_env[t3["envName"]] = env_value
+ # Construct the env
+ j.prepare_environment(runtimeContext, required_env)
shellcmd, _ = self.get_requirement("ShellCommandRequirement")
if shellcmd is not None:
@@ -1042,14 +1184,15 @@ def register_reader(f: CWLObjectType) -> None:
mpi.get("processes", runtimeContext.mpi_config.default_nproc),
)
if isinstance(np, str):
- tmp = builder.do_eval(np)
- if not isinstance(tmp, int):
- raise TypeError(
- "{} needs 'processes' to evaluate to an int, got {}".format(
- MPIRequirementName, type(np)
- )
+ np_eval = builder.do_eval(np)
+ if not isinstance(np_eval, int):
+ raise SourceLine(
+ mpi, "processes", WorkflowException, debug
+ ).makeError(
+ f"{MPIRequirementName} needs 'processes' expression to "
+ f"evaluate to an int, got '{np_eval}' for expression '{np}'."
)
- np = tmp
+ np = np_eval
j.mpi_procs = np
yield j
@@ -1065,9 +1208,7 @@ def collect_output_ports(
) -> OutputPortsType:
ret = {} # type: OutputPortsType
debug = _logger.isEnabledFor(logging.DEBUG)
- cwl_version = self.metadata.get(
- "http://commonwl.org/cwltool#original_cwlVersion", None
- )
+ cwl_version = self.metadata.get(ORIGINAL_CWLVERSION, None)
if cwl_version != "v1.0":
builder.resources["exitCode"] = rcode
try:
@@ -1117,7 +1258,11 @@ def collect_output_ports(
Schema, self.names.get_name("outputs_record_schema", None)
)
validate_ex(
- expected_schema, ret, strict=False, logger=_logger_validation_warnings
+ expected_schema,
+ ret,
+ strict=False,
+ logger=_logger_validation_warnings,
+ vocab=INPUT_OBJ_VOCAB,
)
if ret is not None and builder.mutation_manager is not None:
adjustFileObjs(ret, builder.mutation_manager.set_generation)
@@ -1160,6 +1305,20 @@ def collect_output(
for gb in aslist(binding["glob"]):
gb = builder.do_eval(gb)
if gb:
+ gb_eval_fail = False
+ if not isinstance(gb, str):
+ if isinstance(gb, list):
+ for entry in gb:
+ if not isinstance(entry, str):
+ gb_eval_fail = True
+ else:
+ gb_eval_fail = True
+ if gb_eval_fail:
+ raise WorkflowException(
+ "Resolved glob patterns must be strings "
+ f"or list of strings, not "
+ f"'{gb}' from '{binding['glob']}'"
+ )
globpatterns.extend(aslist(gb))
for gb in globpatterns:
@@ -1178,7 +1337,10 @@ def collect_output(
{
"location": g,
"path": fs_access.join(
- builder.outdir, g[len(prefix[0]) + 1 :]
+ builder.outdir,
+ urllib.parse.unquote(
+ g[len(prefix[0]) + 1 :]
+ ),
),
"basename": os.path.basename(g),
"nameroot": os.path.splitext(
@@ -1256,22 +1418,20 @@ def collect_output(
result = cast(CWLOutputType, r)
if single:
- if not result and not optional:
- with SourceLine(binding, "glob", WorkflowException, debug):
- raise WorkflowException(
- "Did not find output file with glob pattern: '{}'".format(
- globpatterns
- )
- )
- elif not result and optional:
- pass
- elif isinstance(result, MutableSequence):
- if len(result) > 1:
+ with SourceLine(binding, "glob", WorkflowException, debug):
+ if not result and not optional:
raise WorkflowException(
- "Multiple matches for output item that is a single file."
+ f"Did not find output file with glob pattern: '{globpatterns}'."
)
- else:
- result = cast(CWLOutputType, result[0])
+ elif not result and optional:
+ pass
+ elif isinstance(result, MutableSequence):
+ if len(result) > 1:
+ raise WorkflowException(
+ "Multiple matches for output item that is a single file."
+ )
+ else:
+ result = cast(CWLOutputType, result[0])
if "secondaryFiles" in schema:
with SourceLine(schema, "secondaryFiles", WorkflowException, debug):
@@ -1283,9 +1443,27 @@ def collect_output(
]
for sf in aslist(schema["secondaryFiles"]):
if "required" in sf:
- sf_required = builder.do_eval(
- sf["required"], context=primary
- )
+ with SourceLine(
+ schema["secondaryFiles"],
+ "required",
+ WorkflowException,
+ debug,
+ ):
+ sf_required_eval = builder.do_eval(
+ sf["required"], context=primary
+ )
+ if not (
+ isinstance(sf_required_eval, bool)
+ or sf_required_eval is None
+ ):
+ raise WorkflowException(
+ "Expressions in the field "
+ "'required' must evaluate to a "
+ "Boolean (true or false) or None. "
+ f"Got '{sf_required_eval}' for "
+ f"'{sf['required']}'."
+ )
+ sf_required: bool = sf_required_eval or False
else:
sf_required = False
@@ -1321,11 +1499,24 @@ def collect_output(
primary["secondaryFiles"].append(sfitem)
if "format" in schema:
- for primary in aslist(result):
- primary["format"] = builder.do_eval(
- schema["format"], context=primary
- )
-
+ format_field = cast(str, schema["format"])
+ if "$(" in format_field or "${" in format_field:
+ for index, primary in enumerate(aslist(result)):
+ format_eval = builder.do_eval(format_field, context=primary)
+ if not isinstance(format_eval, str):
+ message = (
+ f"'format' expression must evaluate to a string. "
+ f"Got '{format_eval}' from '{format_field}'."
+ )
+ if isinstance(result, list):
+ message += f" 'self' had the value of the index {index} result: '{primary}'."
+ raise SourceLine(
+ schema, "format", WorkflowException, debug
+ ).makeError(message)
+ primary["format"] = format_eval
+ else:
+ for primary in aslist(result):
+ primary["format"] = format_field
# Ensure files point to local references outside of the run environment
adjustFileObjs(result, revmap)
diff --git a/cwltool/context.py b/cwltool/context.py
index 898554f7b9..daf6aa8676 100644
--- a/cwltool/context.py
+++ b/cwltool/context.py
@@ -8,7 +8,8 @@
# move to a regular typing import when Python 3.3-3.6 is no longer supported
from ruamel.yaml.comments import CommentedMap
from schema_salad.avro.schema import Names
-from schema_salad.ref_resolver import FetcherCallableType, Loader
+from schema_salad.ref_resolver import Loader
+from schema_salad.utils import FetcherCallableType
from typing_extensions import TYPE_CHECKING
from .builder import Builder, HasReqsHints
@@ -26,7 +27,9 @@
from .provenance_profile import ProvenanceProfile
-class ContextBase(object):
+class ContextBase:
+ """Shared kwargs based initilizer for {Runtime,Loading}Context."""
+
def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None:
"""Initialize."""
if kwargs:
@@ -71,8 +74,9 @@ def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None:
self.do_update = None # type: Optional[bool]
self.jobdefaults = None # type: Optional[CommentedMap]
self.doc_cache = True # type: bool
+ self.relax_path_checks = False # type: bool
- super(LoadingContext, self).__init__(kwargs)
+ super().__init__(kwargs)
def copy(self):
# type: () -> LoadingContext
@@ -89,9 +93,9 @@ def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None:
self.user_space_docker_cmd = "" # type: Optional[str]
self.secret_store = None # type: Optional[SecretStore]
self.no_read_only = False # type: bool
- self.custom_net = "" # type: Optional[str]
+ self.custom_net = None # type: Optional[str]
self.no_match_user = False # type: bool
- self.preserve_environment = "" # type: Optional[Iterable[str]]
+ self.preserve_environment = None # type: Optional[Iterable[str]]
self.preserve_entire_environment = False # type: bool
self.use_container = True # type: bool
self.force_docker_pull = False # type: bool
@@ -103,9 +107,9 @@ def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None:
self.pull_image = True # type: bool
self.rm_container = True # type: bool
self.move_outputs = "move" # type: str
+ self.streaming_allowed: bool = False
self.singularity = False # type: bool
- self.disable_net = False # type: bool
self.debug = False # type: bool
self.compute_checksum = True # type: bool
self.name = "" # type: str
@@ -148,7 +152,7 @@ def __init__(self, kwargs: Optional[Dict[str, Any]] = None) -> None:
self.mpi_config = MpiConfig() # type: MpiConfig
self.default_stdout = None # type: Optional[Union[IO[bytes], TextIO]]
self.default_stderr = None # type: Optional[Union[IO[bytes], TextIO]]
- super(RuntimeContext, self).__init__(kwargs)
+ super().__init__(kwargs)
if self.tmp_outdir_prefix == "":
self.tmp_outdir_prefix = self.tmpdir_prefix
diff --git a/cwltool/cwlrdf.py b/cwltool/cwlrdf.py
index 3df6dfdd64..f099f391fe 100644
--- a/cwltool/cwlrdf.py
+++ b/cwltool/cwlrdf.py
@@ -5,7 +5,7 @@
from rdflib import Graph
from ruamel.yaml.comments import CommentedMap
from schema_salad.jsonld_context import makerdf
-from schema_salad.ref_resolver import ContextType
+from schema_salad.utils import ContextType
from .cwlviewer import CWLViewer
from .process import Process
@@ -47,8 +47,8 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
for step, run, _ in qres:
stdout.write(
- u'"%s" [label="%s"]\n'
- % (lastpart(step), "%s (%s)" % (lastpart(step), lastpart(run)))
+ '"%s" [label="%s"]\n'
+ % (lastpart(step), f"{lastpart(step)} ({lastpart(run)})")
)
qres = g.query(
@@ -61,12 +61,12 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
)
for step, inp, source in qres:
- stdout.write(u'"%s" [shape=box]\n' % (lastpart(inp)))
+ stdout.write('"%s" [shape=box]\n' % (lastpart(inp)))
stdout.write(
- u'"%s" -> "%s" [label="%s"]\n' % (lastpart(source), lastpart(inp), "")
+ '"{}" -> "{}" [label="{}"]\n'.format(lastpart(source), lastpart(inp), "")
)
stdout.write(
- u'"%s" -> "%s" [label="%s"]\n' % (lastpart(inp), lastpart(step), "")
+ '"{}" -> "{}" [label="{}"]\n'.format(lastpart(inp), lastpart(step), "")
)
qres = g.query(
@@ -78,9 +78,9 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
)
for step, out in qres:
- stdout.write(u'"%s" [shape=box]\n' % (lastpart(out)))
+ stdout.write('"%s" [shape=box]\n' % (lastpart(out)))
stdout.write(
- u'"%s" -> "%s" [label="%s"]\n' % (lastpart(step), lastpart(out), "")
+ '"{}" -> "{}" [label="{}"]\n'.format(lastpart(step), lastpart(out), "")
)
qres = g.query(
@@ -92,9 +92,9 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
)
for out, source in qres:
- stdout.write(u'"%s" [shape=octagon]\n' % (lastpart(out)))
+ stdout.write('"%s" [shape=octagon]\n' % (lastpart(out)))
stdout.write(
- u'"%s" -> "%s" [label="%s"]\n' % (lastpart(source), lastpart(out), "")
+ '"{}" -> "{}" [label="{}"]\n'.format(lastpart(source), lastpart(out), "")
)
qres = g.query(
@@ -106,7 +106,7 @@ def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
)
for (inp,) in qres:
- stdout.write(u'"%s" [shape=octagon]\n' % (lastpart(inp)))
+ stdout.write('"%s" [shape=octagon]\n' % (lastpart(inp)))
def dot_without_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
@@ -149,9 +149,7 @@ def dot_without_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> Non
if wf in subworkflows:
if wf not in dotname:
dotname[wf] = "cluster_" + lastpart(wf)
- stdout.write(
- u'subgraph "%s" { label="%s"\n' % (dotname[wf], lastpart(wf))
- )
+ stdout.write(f'subgraph "{dotname[wf]}" {{ label="{lastpart(wf)}"\n')
currentwf = wf
clusternode[wf] = step
else:
@@ -159,7 +157,7 @@ def dot_without_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> Non
if str(runtype) != "https://w3id.org/cwl/cwl#Workflow":
stdout.write(
- u'"%s" [label="%s"]\n'
+ '"%s" [label="%s"]\n'
% (dotname[step], urllib.parse.urldefrag(str(step))[1])
)
@@ -182,12 +180,12 @@ def dot_without_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> Non
for src, sink, srcrun, sinkrun in qres:
attr = ""
if srcrun in clusternode:
- attr += u'ltail="%s"' % dotname[srcrun]
+ attr += 'ltail="%s"' % dotname[srcrun]
src = clusternode[srcrun]
if sinkrun in clusternode:
- attr += u' lhead="%s"' % dotname[sinkrun]
+ attr += ' lhead="%s"' % dotname[sinkrun]
sink = clusternode[sinkrun]
- stdout.write(u'"%s" -> "%s" [%s]\n' % (dotname[src], dotname[sink], attr))
+ stdout.write(f'"{dotname[src]}" -> "{dotname[sink]}" [{attr}]\n')
def printdot(
diff --git a/cwltool/docker.py b/cwltool/docker.py
index 38f2dd87da..e5e2ae45b3 100644
--- a/cwltool/docker.py
+++ b/cwltool/docker.py
@@ -8,8 +8,7 @@
import subprocess # nosec
import sys
import threading
-from distutils import spawn
-from io import StringIO, open # pylint: disable=redefined-builtin
+from io import StringIO # pylint: disable=redefined-builtin
from typing import Callable, Dict, List, MutableMapping, Optional, Set, Tuple, cast
import requests
@@ -21,13 +20,7 @@
from .job import ContainerCommandLineJob
from .loghandler import _logger
from .pathmapper import MapperEnt, PathMapper
-from .utils import (
- CWLObjectType,
- create_tmp_dir,
- docker_windows_path_adjust,
- ensure_writable,
- onWindows,
-)
+from .utils import CWLObjectType, create_tmp_dir, ensure_writable
_IMAGES = set() # type: Set[str]
_IMAGES_LOCK = threading.Lock()
@@ -62,14 +55,10 @@ def _get_docker_machine_mounts() -> List[str]:
def _check_docker_machine_path(path: Optional[str]) -> None:
if path is None:
return
- if onWindows():
- path = path.lower()
mounts = _get_docker_machine_mounts()
found = False
for mount in mounts:
- if onWindows():
- mount = mount.lower()
if path.startswith(mount):
found = True
break
@@ -99,9 +88,7 @@ def __init__(
name: str,
) -> None:
"""Initialize a command line builder using the Docker software container engine."""
- super(DockerCommandLineJob, self).__init__(
- builder, joborder, make_path_mapper, requirements, hints, name
- )
+ super().__init__(builder, joborder, make_path_mapper, requirements, hints, name)
@staticmethod
def get_image(
@@ -237,7 +224,7 @@ def get_from_requirements(
force_pull: bool,
tmp_outdir_prefix: str,
) -> Optional[str]:
- if not spawn.find_executable("docker"):
+ if not shutil.which("docker"):
raise WorkflowException("docker executable is not available")
if self.get_image(
@@ -261,7 +248,7 @@ def append_volume(
output = StringIO()
csv.writer(output).writerow(options)
mount_arg = output.getvalue().strip()
- runtime.append("--mount={}".format(mount_arg))
+ runtime.append(f"--mount={mount_arg}")
# Unlike "--volume", "--mount" will fail if the volume doesn't already exist.
if not os.path.exists(source):
os.makedirs(source)
@@ -271,7 +258,7 @@ def add_file_or_directory_volume(
) -> None:
"""Append volume a file/dir mapping to the runtime option list."""
if not volume.resolved.startswith("_:"):
- _check_docker_machine_path(docker_windows_path_adjust(volume.resolved))
+ _check_docker_machine_path(volume.resolved)
self.append_volume(runtime, volume.resolved, volume.target)
def add_writable_file_volume(
@@ -331,6 +318,15 @@ def add_writable_directory_volume(
shutil.copytree(volume.resolved, host_outdir_tgt)
ensure_writable(host_outdir_tgt or new_dir)
+ def _required_env(self) -> Dict[str, str]:
+ # spec currently says "HOME must be set to the designated output
+ # directory." but spec might change to designated temp directory.
+ # runtime.append("--env=HOME=/tmp")
+ return {
+ "TMPDIR": self.CONTAINER_TMPDIR,
+ "HOME": self.builder.outdir,
+ }
+
def create_runtime(
self, env: MutableMapping[str, str], runtimeContext: RuntimeContext
) -> Tuple[List[str], Optional[str]]:
@@ -348,9 +344,8 @@ def create_runtime(
self.append_volume(
runtime, os.path.realpath(self.outdir), self.builder.outdir, writable=True
)
- tmpdir = "/tmp" # nosec
self.append_volume(
- runtime, os.path.realpath(self.tmpdir), tmpdir, writable=True
+ runtime, os.path.realpath(self.tmpdir), self.CONTAINER_TMPDIR, writable=True
)
self.add_volumes(
self.pathmapper,
@@ -372,9 +367,7 @@ def create_runtime(
runtime = [x.replace(":ro", "") for x in runtime]
runtime = [x.replace(":rw", "") for x in runtime]
- runtime.append(
- "--workdir=%s" % (docker_windows_path_adjust(self.builder.outdir))
- )
+ runtime.append("--workdir=%s" % (self.builder.outdir))
if not user_space_docker_cmd:
if not runtimeContext.no_read_only:
@@ -382,7 +375,7 @@ def create_runtime(
if self.networkaccess:
if runtimeContext.custom_net:
- runtime.append("--net={0}".format(runtimeContext.custom_net))
+ runtime.append(f"--net={runtimeContext.custom_net}")
else:
runtime.append("--net=none")
@@ -390,9 +383,7 @@ def create_runtime(
runtime.append("--log-driver=none")
euid, egid = docker_vm_id()
- if not onWindows():
- # MS Windows does not have getuid() or geteuid() functions
- euid, egid = euid or os.geteuid(), egid or os.getgid()
+ euid, egid = euid or os.geteuid(), egid or os.getgid()
if runtimeContext.no_match_user is False and (
euid is not None and egid is not None
@@ -402,13 +393,6 @@ def create_runtime(
if runtimeContext.rm_container:
runtime.append("--rm")
- runtime.append("--env=TMPDIR=/tmp")
-
- # spec currently says "HOME must be set to the designated output
- # directory." but spec might change to designated temp directory.
- # runtime.append("--env=HOME=/tmp")
- runtime.append("--env=HOME=%s" % self.builder.outdir)
-
cidfile_path = None # type: Optional[str]
# add parameters to docker to write a container ID file
if runtimeContext.user_space_docker_cmd is None:
@@ -437,7 +421,7 @@ def create_runtime(
cidfile_path = os.path.join(cidfile_dir, cidfile_name)
runtime.append("--cidfile=%s" % cidfile_path)
for key, value in self.environment.items():
- runtime.append("--env=%s=%s" % (key, value))
+ runtime.append(f"--env={key}={value}")
if runtimeContext.strict_memory_limit and not user_space_docker_cmd:
ram = self.builder.resources["ram"]
diff --git a/cwltool/env_to_stdout.py b/cwltool/env_to_stdout.py
new file mode 100644
index 0000000000..a05cd82302
--- /dev/null
+++ b/cwltool/env_to_stdout.py
@@ -0,0 +1,33 @@
+r"""Python script that acts like (GNU coreutils) env -0.
+
+When run as a script, it prints the the environment as
+`(VARNAME=value\0)*`.
+
+Ideally we would just use `env -0`, because python (thanks to PEPs 538
+and 540) will set zero to two environment variables to better handle
+Unicode-locale interactions, however BSD familiy implementations of
+`env` do not all support the `-0` flag so we supply this script that
+produces equivalent output.
+"""
+
+import os
+from typing import Dict
+
+
+def deserialize_env(data: str) -> Dict[str, str]:
+ """Deserialize the output of `env -0` to dictionary."""
+ ans = {}
+ for item in data.strip("\0").split("\0"):
+ key, val = item.split("=", 1)
+ ans[key] = val
+ return ans
+
+
+def main() -> None:
+ """Print the null-separated enviroment to stdout."""
+ for k, v in os.environ.items():
+ print(f"{k}={v}", end="\0")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/cwltool/errors.py b/cwltool/errors.py
index 9df1236faf..f355236a08 100644
--- a/cwltool/errors.py
+++ b/cwltool/errors.py
@@ -4,3 +4,9 @@ class WorkflowException(Exception):
class UnsupportedRequirement(WorkflowException):
pass
+
+
+class ArgumentException(Exception):
+ """Mismatched command line arguments provided."""
+
+ pass
diff --git a/cwltool/executors.py b/cwltool/executors.py
index 53bf1ee44d..c04c78b0cb 100644
--- a/cwltool/executors.py
+++ b/cwltool/executors.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""Single and multi-threaded executors."""
import datetime
import functools
@@ -32,16 +31,16 @@
from .mutation import MutationManager
from .process import Process, cleanIntermediate, relocateOutputs
from .provenance_profile import ProvenanceProfile
+from .task_queue import TaskQueue
+from .update import ORIGINAL_CWLVERSION
from .utils import CWLObjectType, JobsType
from .workflow import Workflow
from .workflow_job import WorkflowJob, WorkflowJobStep
-from .task_queue import TaskQueue
-
TMPDIR_LOCK = Lock()
-class JobExecutor(object, metaclass=ABCMeta):
+class JobExecutor(metaclass=ABCMeta):
"""Abstract base job executor."""
def __init__(self) -> None:
@@ -90,9 +89,9 @@ def execute(
def check_for_abstract_op(tool: CWLObjectType) -> None:
if tool["class"] == "Operation":
- raise SourceLine(tool, "class", WorkflowException).makeError(
- "Workflow has unrunnable abstract Operation"
- )
+ raise SourceLine(
+ tool, "class", WorkflowException, runtime_context.debug
+ ).makeError("Workflow has unrunnable abstract Operation")
process.visit(check_for_abstract_op)
@@ -110,10 +109,7 @@ def check_for_abstract_op(tool: CWLObjectType) -> None:
job_reqs = None # type: Optional[List[CWLObjectType]]
if "https://w3id.org/cwl/cwl#requirements" in job_order_object:
- if (
- process.metadata.get("http://commonwl.org/cwltool#original_cwlVersion")
- == "v1.0"
- ):
+ if process.metadata.get(ORIGINAL_CWLVERSION) == "v1.0":
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
@@ -128,10 +124,7 @@ def check_for_abstract_op(tool: CWLObjectType) -> None:
and "https://w3id.org/cwl/cwl#requirements"
in cast(CWLObjectType, process.metadata["cwl:defaults"])
):
- if (
- process.metadata.get("http://commonwl.org/cwltool#original_cwlVersion")
- == "v1.0"
- ):
+ if process.metadata.get(ORIGINAL_CWLVERSION) == "v1.0":
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
@@ -279,7 +272,7 @@ class MultithreadedJobExecutor(JobExecutor):
def __init__(self) -> None:
"""Initialize."""
- super(MultithreadedJobExecutor, self).__init__()
+ super().__init__()
self.exceptions = [] # type: List[WorkflowException]
self.pending_jobs = [] # type: List[JobsType]
self.pending_jobs_lock = threading.Lock()
@@ -333,10 +326,10 @@ def _runner(self, job, runtime_context, TMPDIR_LOCK):
)
job.run(runtime_context, TMPDIR_LOCK)
except WorkflowException as err:
- _logger.exception("Got workflow error: {}".format(err))
+ _logger.exception(f"Got workflow error: {err}")
self.exceptions.append(err)
except Exception as err: # pylint: disable=broad-except
- _logger.exception("Got workflow error: {}".format(err))
+ _logger.exception(f"Got workflow error: {err}")
self.exceptions.append(WorkflowException(str(err)))
finally:
if runtime_context.workflow_eval_lock:
diff --git a/cwltool/expression.py b/cwltool/expression.py
index 89786d9cc9..8057886b63 100644
--- a/cwltool/expression.py
+++ b/cwltool/expression.py
@@ -21,12 +21,7 @@
from .errors import WorkflowException
from .loghandler import _logger
from .sandboxjs import JavascriptException, default_timeout, execjs
-from .utils import (
- CWLObjectType,
- CWLOutputType,
- bytes2str_in_dicts,
- docker_windows_path_adjust,
-)
+from .utils import CWLObjectType, CWLOutputType, bytes2str_in_dicts
def jshead(engine_config: List[str], rootvars: CWLObjectType) -> str:
@@ -35,10 +30,7 @@ def jshead(engine_config: List[str], rootvars: CWLObjectType) -> str:
return "\n".join(
engine_config
- + [
- "var {} = {};".format(k, json_dumps(v, indent=4))
- for k, v in rootvars.items()
- ]
+ + [f"var {k} = {json_dumps(v, indent=4)};" for k, v in rootvars.items()]
)
@@ -47,9 +39,9 @@ def jshead(engine_config: List[str], rootvars: CWLObjectType) -> str:
seg_single = r"""\['([^']|\\')+'\]"""
seg_double = r"""\["([^"]|\\")+"\]"""
seg_index = r"""\[[0-9]+\]"""
-segments = r"(\.%s|%s|%s|%s)" % (seg_symbol, seg_single, seg_double, seg_index)
+segments = fr"(\.{seg_symbol}|{seg_single}|{seg_double}|{seg_index})"
segment_re = re.compile(segments, flags=re.UNICODE)
-param_str = r"\((%s)%s*\)$" % (seg_symbol, segments)
+param_str = fr"\(({seg_symbol}){segments}*\)$"
param_re = re.compile(param_str, flags=re.UNICODE)
@@ -142,7 +134,7 @@ def next_seg(
m = segment_re.match(remaining_string)
if not m:
return current_value
- next_segment_str = m.group(0)
+ next_segment_str = m.group(1)
key = None # type: Optional[Union[str, int]]
if next_segment_str[0] == ".":
@@ -154,7 +146,7 @@ def next_seg(
if (
isinstance(current_value, MutableSequence)
and key == "length"
- and not remaining_string[m.end(0) :]
+ and not remaining_string[m.end(1) :]
):
return len(current_value)
if not isinstance(current_value, MutableMapping):
@@ -163,9 +155,7 @@ def next_seg(
% (parsed_string, type(current_value).__name__, key)
)
if key not in current_value:
- raise WorkflowException(
- "%s does not contain key '%s'" % (parsed_string, key)
- )
+ raise WorkflowException(f"{parsed_string} does not contain key '{key}'")
else:
try:
key = int(next_segment_str[1:-1])
@@ -185,28 +175,22 @@ def next_seg(
try:
return next_seg(
parsed_string + remaining_string,
- remaining_string[m.end(0) :],
+ remaining_string[m.end(1) :],
cast(CWLOutputType, current_value[cast(str, key)]),
)
except KeyError:
- raise WorkflowException(
- "%s doesn't have property %s" % (parsed_string, key)
- )
+ raise WorkflowException(f"{parsed_string} doesn't have property {key}")
elif isinstance(current_value, list) and isinstance(key, int):
try:
return next_seg(
parsed_string + remaining_string,
- remaining_string[m.end(0) :],
+ remaining_string[m.end(1) :],
current_value[key],
)
except KeyError:
- raise WorkflowException(
- "%s doesn't have property %s" % (parsed_string, key)
- )
+ raise WorkflowException(f"{parsed_string} doesn't have property {key}")
else:
- raise WorkflowException(
- "%s doesn't have property %s" % (parsed_string, key)
- )
+ raise WorkflowException(f"{parsed_string} doesn't have property {key}")
else:
return current_value
@@ -233,7 +217,7 @@ def evaluator(
if first_symbol_end + 1 == len(ex) and first_symbol == "null":
return None
try:
- if obj.get(first_symbol) is None:
+ if first_symbol not in obj:
raise WorkflowException("%s is not defined" % first_symbol)
return next_seg(
@@ -271,7 +255,7 @@ def evaluator(
def _convert_dumper(string: str) -> str:
- return "{} + ".format(json.dumps(string))
+ return f"{json.dumps(string)} + "
def interpolate(
@@ -304,7 +288,7 @@ def interpolate(
w = scanner(scan)
while w:
if convert_to_expression:
- parts.append('"{}" + '.format(scan[0 : w[0]]))
+ parts.append(f'"{scan[0 : w[0]]}" + ')
else:
parts.append(scan[0 : w[0]])
@@ -358,7 +342,7 @@ def interpolate(
scan = scan[w[1] :]
w = scanner(scan)
if convert_to_expression:
- parts.append('"{}"'.format(scan))
+ parts.append(f'"{scan}"')
parts.append(";}")
else:
parts.append(scan)
@@ -386,8 +370,8 @@ def do_eval(
) -> Optional[CWLOutputType]:
runtime = cast(MutableMapping[str, Union[int, str, None]], copy.deepcopy(resources))
- runtime["tmpdir"] = docker_windows_path_adjust(tmpdir) if tmpdir else None
- runtime["outdir"] = docker_windows_path_adjust(outdir) if outdir else None
+ runtime["tmpdir"] = tmpdir if tmpdir else None
+ runtime["outdir"] = outdir if outdir else None
rootvars = cast(
CWLObjectType,
diff --git a/cwltool/factory.py b/cwltool/factory.py
index d2a4104574..5c888c5b12 100644
--- a/cwltool/factory.py
+++ b/cwltool/factory.py
@@ -12,12 +12,14 @@
class WorkflowStatus(Exception):
def __init__(self, out: Optional[CWLObjectType], status: str) -> None:
"""Signaling exception for the status of a Workflow."""
- super(WorkflowStatus, self).__init__("Completed %s" % status)
+ super().__init__("Completed %s" % status)
self.out = out
self.status = status
-class Callable(object):
+class Callable:
+ """Result of Factory.make()."""
+
def __init__(self, t: Process, factory: "Factory") -> None:
"""Initialize."""
self.t = t
@@ -34,14 +36,15 @@ def __call__(self, **kwargs):
return out
-class Factory(object):
+class Factory:
+ """Easy way to load a CWL document for execution."""
+
def __init__(
self,
executor: Optional[JobExecutor] = None,
loading_context: Optional[LoadingContext] = None,
runtime_context: Optional[RuntimeContext] = None,
) -> None:
- """Easy way to load a CWL document for execution."""
if executor is None:
executor = SingleJobExecutor()
self.executor = executor
diff --git a/cwltool/job.py b/cwltool/job.py
index c4132d5508..fd74d937f6 100644
--- a/cwltool/job.py
+++ b/cwltool/job.py
@@ -5,6 +5,7 @@
import os
import re
import shutil
+import stat
import subprocess # nosec
import sys
import tempfile
@@ -17,8 +18,10 @@
from typing import (
IO,
Callable,
+ Dict,
Iterable,
List,
+ Mapping,
Match,
MutableMapping,
MutableSequence,
@@ -36,6 +39,7 @@
from schema_salad.utils import json_dump, json_dumps
from typing_extensions import TYPE_CHECKING
+from . import env_to_stdout, run_job
from .builder import Builder, HasReqsHints
from .context import RuntimeContext
from .errors import UnsupportedRequirement, WorkflowException
@@ -49,11 +53,9 @@
DirectoryType,
OutputCallbackType,
bytes2str_in_dicts,
- copytree_with_merge,
create_tmp_dir,
ensure_non_writable,
ensure_writable,
- onWindows,
processes_to_kill,
)
@@ -64,66 +66,7 @@
FORCE_SHELLED_POPEN = os.getenv("CWLTOOL_FORCE_SHELL_POPEN", "0") == "1"
SHELL_COMMAND_TEMPLATE = """#!/bin/bash
-python "run_job.py" "job.json"
-"""
-
-PYTHON_RUN_SCRIPT = """
-import json
-import os
-import sys
-if os.name == 'posix':
- try:
- import subprocess32 as subprocess # type: ignore
- except Exception:
- import subprocess
-else:
- import subprocess # type: ignore
-
-with open(sys.argv[1], "r") as f:
- popen_description = json.load(f)
- commands = popen_description["commands"]
- cwd = popen_description["cwd"]
- env = popen_description["env"]
- env["PATH"] = os.environ.get("PATH")
- stdin_path = popen_description["stdin_path"]
- stdout_path = popen_description["stdout_path"]
- stderr_path = popen_description["stderr_path"]
- if stdin_path is not None:
- stdin = open(stdin_path, "rb")
- else:
- stdin = subprocess.PIPE
- if stdout_path is not None:
- stdout = open(stdout_path, "wb")
- else:
- stdout = sys.stderr
- if stderr_path is not None:
- stderr = open(stderr_path, "wb")
- else:
- stderr = sys.stderr
- if os.name == 'nt':
- close_fds = False
- for key, value in env.items():
- env[key] = str(value)
- else:
- close_fds = True
- sp = subprocess.Popen(commands,
- shell=False,
- close_fds=close_fds,
- stdin=stdin,
- stdout=stdout,
- stderr=stderr,
- env=env,
- cwd=cwd)
- if sp.stdin:
- sp.stdin.close()
- rcode = sp.wait()
- if stdin is not subprocess.PIPE:
- stdin.close()
- if stdout is not sys.stderr:
- stdout.close()
- if stderr is not sys.stderr:
- stderr.close()
- sys.exit(rcode)
+python3 "run_job.py" "job.json"
"""
@@ -155,15 +98,7 @@ def relink_initialworkdir(
pass
elif os.path.isdir(host_outdir_tgt) and not vol.resolved.startswith("_:"):
shutil.rmtree(host_outdir_tgt)
- if onWindows():
- # If this becomes a big issue for someone then we could
- # refactor the code to process output from a running container
- # and avoid all the extra IO below
- if vol.type in ("File", "WritableFile"):
- shutil.copy(vol.resolved, host_outdir_tgt)
- elif vol.type in ("Directory", "WritableDirectory"):
- copytree_with_merge(vol.resolved, host_outdir_tgt)
- elif not vol.resolved.startswith("_:"):
+ if not vol.resolved.startswith("_:"):
try:
os.symlink(vol.resolved, host_outdir_tgt)
except FileExistsError:
@@ -188,7 +123,7 @@ def __init__(
name: str,
) -> None:
"""Initialize the job object."""
- super(JobBase, self).__init__()
+ super().__init__()
self.builder = builder
self.joborder = joborder
self.stdin = None # type: Optional[str]
@@ -241,13 +176,24 @@ def _setup(self, runtimeContext: RuntimeContext) -> None:
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
+ def is_streamable(file: str) -> bool:
+ if not runtimeContext.streaming_allowed:
+ return False
+ for inp in self.joborder.values():
+ if isinstance(inp, dict) and inp.get("location", None) == file:
+ return inp.get("streamable", False)
+ return False
+
for knownfile in self.pathmapper.files():
p = self.pathmapper.mapper(knownfile)
if p.type == "File" and not os.path.isfile(p[0]) and p.staged:
- raise WorkflowException(
- "Input file %s (at %s) not found or is not a regular "
- "file." % (knownfile, self.pathmapper.mapper(knownfile)[0])
- )
+ if not (
+ is_streamable(knownfile) and stat.S_ISFIFO(os.stat(p[0]).st_mode)
+ ):
+ raise WorkflowException(
+ "Input file %s (at %s) not found or is not a regular "
+ "file." % (knownfile, self.pathmapper.mapper(knownfile)[0])
+ )
if "listing" in self.generatefiles:
runtimeContext = runtimeContext.copy()
@@ -278,7 +224,18 @@ def _execute(
runtimeContext: RuntimeContext,
monitor_function=None, # type: Optional[Callable[[subprocess.Popen[str]], None]]
) -> None:
+ """Execute the tool, either directly or via script.
+ Note: we are now at the point where self.environment is
+ ignored. The caller is responsible for correctly splitting that
+ into the runtime and env arguments.
+
+ `runtime` is the list of arguments to put at the start of the
+ command (e.g. docker run).
+
+ `env` is the enviroment to be set for running the resulting
+ command line.
+ """
scr = self.get_requirement("ShellCommandRequirement")[0]
shouldquote = needs_shell_quoting_re.search
@@ -335,9 +292,7 @@ def _execute(
if self.stdin is not None:
rmap = self.pathmapper.reversemap(self.stdin)
if rmap is None:
- raise WorkflowException(
- "{} missing from pathmapper".format(self.stdin)
- )
+ raise WorkflowException(f"{self.stdin} missing from pathmapper")
else:
stdin_path = rmap[1]
@@ -489,6 +444,61 @@ def _execute(
)
shutil.rmtree(self.tmpdir, True)
+ @abstractmethod
+ def _required_env(self) -> Dict[str, str]:
+ """Variables required by the CWL spec (HOME, TMPDIR, etc).
+
+ Note that with containers, the paths will (likely) be those from
+ inside.
+ """
+ pass
+
+ def _preserve_environment_on_containers_warning(
+ self, varname: Optional[Iterable[str]] = None
+ ) -> None:
+ """When running in a container, issue a warning."""
+ # By default, don't do anything; ContainerCommandLineJob below
+ # will issue a warning.
+ pass
+
+ def prepare_environment(
+ self, runtimeContext: RuntimeContext, envVarReq: Mapping[str, str]
+ ) -> None:
+ """Set up environment variables.
+
+ Here we prepare the environment for the job, based on any
+ preserved variables and `EnvVarRequirement`. Later, changes due
+ to `MPIRequirement`, `Secrets`, or `SoftwareRequirement` are
+ applied (in that order).
+ """
+ # Start empty
+ env: Dict[str, str] = {}
+
+ # Preserve any env vars
+ if runtimeContext.preserve_entire_environment:
+ self._preserve_environment_on_containers_warning()
+ env.update(os.environ)
+ elif runtimeContext.preserve_environment:
+ self._preserve_environment_on_containers_warning(
+ runtimeContext.preserve_environment
+ )
+ for key in runtimeContext.preserve_environment:
+ try:
+ env[key] = os.environ[key]
+ except KeyError:
+ _logger.warning(
+ f"Attempting to preserve environment variable '{key}' which is not present"
+ )
+
+ # Set required env vars
+ env.update(self._required_env())
+
+ # Apply EnvVarRequirement
+ env.update(envVarReq)
+
+ # Set on ourselves
+ self.environment = env
+
def process_monitor(self, sproc): # type: (subprocess.Popen[str]) -> None
monitor = psutil.Process(sproc.pid)
# Value must be list rather than integer to utilise pass-by-reference in python
@@ -498,9 +508,9 @@ def get_tree_mem_usage(memory_usage: MutableSequence[Optional[int]]) -> None:
children = monitor.children()
rss = monitor.memory_info().rss
while len(children):
- rss += sum([process.memory_info().rss for process in children])
+ rss += sum(process.memory_info().rss for process in children)
children = list(
- itertools.chain(*[process.children() for process in children])
+ itertools.chain(*(process.children() for process in children))
)
if memory_usage[0] is None or rss > memory_usage[0]:
memory_usage[0] = rss
@@ -539,26 +549,6 @@ def run(
self._setup(runtimeContext)
- env = self.environment
- vars_to_preserve = runtimeContext.preserve_environment
- if runtimeContext.preserve_entire_environment is not False:
- vars_to_preserve = os.environ
- if vars_to_preserve:
- for key, value in os.environ.items():
- if key in vars_to_preserve and key not in env:
- # On Windows, subprocess env can't handle unicode.
- env[key] = str(value) if onWindows() else value
- env["HOME"] = str(self.outdir) if onWindows() else self.outdir
- env["TMPDIR"] = str(self.tmpdir) if onWindows() else self.tmpdir
- if "PATH" not in env:
- env["PATH"] = str(os.environ["PATH"]) if onWindows() else os.environ["PATH"]
- if "SYSTEMROOT" not in env and "SYSTEMROOT" in os.environ:
- env["SYSTEMROOT"] = (
- str(os.environ["SYSTEMROOT"])
- if onWindows()
- else os.environ["SYSTEMROOT"]
- )
-
stage_files(
self.pathmapper,
ignore_writable=True,
@@ -581,7 +571,16 @@ def run(
monitor_function = functools.partial(self.process_monitor)
- self._execute([], env, runtimeContext, monitor_function)
+ self._execute([], self.environment, runtimeContext, monitor_function)
+
+ def _required_env(self) -> Dict[str, str]:
+ env = {}
+ env["HOME"] = self.outdir
+ env["TMPDIR"] = self.tmpdir
+ env["PATH"] = os.environ["PATH"]
+ if "SYSTEMROOT" in os.environ:
+ env["SYSTEMROOT"] = os.environ["SYSTEMROOT"]
+ return env
CONTROL_CODE_RE = r"\x1b\[[0-9;]*[a-zA-Z]"
@@ -590,6 +589,8 @@ def run(
class ContainerCommandLineJob(JobBase, metaclass=ABCMeta):
"""Commandline job using containers."""
+ CONTAINER_TMPDIR: str = "/tmp" # nosec
+
@abstractmethod
def get_from_requirements(
self,
@@ -641,6 +642,19 @@ def add_writable_directory_volume(
) -> None:
"""Append a writable directory mapping to the runtime option list."""
+ def _preserve_environment_on_containers_warning(
+ self, varnames: Optional[Iterable[str]] = None
+ ) -> None:
+ """When running in a container, issue a warning."""
+ if varnames is None:
+ flags = "--preserve-entire-environment"
+ else:
+ flags = "--preserve-environment={" + ", ".join(varnames) + "}"
+
+ _logger.warning(
+ f"You have specified `{flags}` while running a container which will override variables set in the container. This may break the container, be non-portable, and/or affect reproducibility."
+ )
+
def create_file_and_add_volume(
self,
runtime: List[str],
@@ -715,6 +729,7 @@ def run(
runtimeContext: RuntimeContext,
tmpdir_lock: Optional[threading.Lock] = None,
) -> None:
+ debug = runtimeContext.debug
if tmpdir_lock:
with tmpdir_lock:
if not os.path.exists(self.tmpdir):
@@ -726,7 +741,6 @@ def run(
(docker_req, docker_is_req) = self.get_requirement("DockerRequirement")
self.prov_obj = runtimeContext.prov_obj
img_id = None
- env = cast(MutableMapping[str, str], os.environ)
user_space_docker_cmd = runtimeContext.user_space_docker_cmd
if docker_req is not None and user_space_docker_cmd:
# For user-space docker implementations, a local image name or ID
@@ -740,22 +754,18 @@ def run(
try:
subprocess.check_call(cmd, stdout=sys.stderr) # nosec
except OSError:
- raise WorkflowException(
- SourceLine(docker_req).makeError(
- "Either Docker container {} is not available with "
- "user space docker implementation {} or {} is missing "
- "or broken.".format(
- img_id, user_space_docker_cmd, user_space_docker_cmd
- )
- )
+ raise SourceLine(
+ docker_req, None, WorkflowException, debug
+ ).makeError(
+ f"Either Docker container {img_id} is not available with "
+ f"user space docker implementation {user_space_docker_cmd} "
+ f" or {user_space_docker_cmd} is missing or broken."
)
else:
- raise WorkflowException(
- SourceLine(docker_req).makeError(
- "Docker image must be specified as 'dockerImageId' or "
- "'dockerPull' when using user space implementations of "
- "Docker"
- )
+ raise SourceLine(docker_req, None, WorkflowException, debug).makeError(
+ "Docker image must be specified as 'dockerImageId' or "
+ "'dockerPull' when using user space implementations of "
+ "Docker"
)
else:
try:
@@ -807,7 +817,9 @@ def run(
_logger.debug("%s error", container, exc_info=True)
if docker_is_req:
raise UnsupportedRequirement(
- "%s is required to run this tool: %s" % (container, str(err))
+ "{} is required to run this tool: {}".format(
+ container, str(err)
+ )
) from err
else:
raise WorkflowException(
@@ -818,7 +830,11 @@ def run(
)
self._setup(runtimeContext)
+
+ # Copy as don't want to modify our env
+ env = dict(os.environ)
(runtime, cidfile) = self.create_runtime(env, runtimeContext)
+
runtime.append(str(img_id))
monitor_function = None
if cidfile:
@@ -880,7 +896,7 @@ def docker_monitor(
return
max_mem_percent = 0 # type: float
mem_percent = 0 # type: float
- with open(stats_file_name, mode="r") as stats:
+ with open(stats_file_name) as stats:
while True:
line = stats.readline()
if not line:
@@ -907,7 +923,7 @@ def _job_popen(
stdin_path: Optional[str],
stdout_path: Optional[str],
stderr_path: Optional[str],
- env: MutableMapping[str, str],
+ env: Mapping[str, str],
cwd: str,
make_job_dir: Callable[[], str],
job_script_contents: Optional[str] = None,
@@ -939,7 +955,7 @@ def _job_popen(
sproc = subprocess.Popen(
commands,
shell=False, # nosec
- close_fds=not onWindows(),
+ close_fds=True,
stdin=stdin,
stdout=stdout,
stderr=stderr,
@@ -990,15 +1006,10 @@ def terminate(): # type: () -> None
if job_script_contents is None:
job_script_contents = SHELL_COMMAND_TEMPLATE
- env_copy = {}
- key = None # type: Optional[str]
- for key in env:
- env_copy[key] = env[key]
-
job_description = {
"commands": commands,
"cwd": cwd,
- "env": env_copy,
+ "env": env,
"stdout_path": stdout_path,
"stderr_path": stderr_path,
"stdin_path": stdin_path,
@@ -1013,9 +1024,13 @@ def terminate(): # type: () -> None
job_script = os.path.join(job_dir, "run_job.bash")
with open(job_script, "wb") as _:
_.write(job_script_contents.encode("utf-8"))
+
job_run = os.path.join(job_dir, "run_job.py")
- with open(job_run, "wb") as _:
- _.write(PYTHON_RUN_SCRIPT.encode("utf-8"))
+ shutil.copyfile(run_job.__file__, job_run)
+
+ env_getter = os.path.join(job_dir, "env_to_stdout.py")
+ shutil.copyfile(env_to_stdout.__file__, env_getter)
+
sproc = subprocess.Popen( # nosec
["bash", job_script.encode("utf-8")],
shell=False, # nosec
diff --git a/cwltool/load_tool.py b/cwltool/load_tool.py
index ed6cde383f..2361ef6d99 100644
--- a/cwltool/load_tool.py
+++ b/cwltool/load_tool.py
@@ -31,7 +31,7 @@
json_dumps,
)
-from . import process, update
+from . import CWL_CONTENT_TYPES, process, update
from .context import LoadingContext
from .errors import WorkflowException
from .loghandler import _logger
@@ -127,7 +127,10 @@ def fetch_document(
resolver=loadingContext.resolver,
document_loader=loadingContext.loader,
)
- workflowobj = cast(CommentedMap, loadingContext.loader.fetch(fileuri))
+ workflowobj = cast(
+ CommentedMap,
+ loadingContext.loader.fetch(fileuri, content_types=CWL_CONTENT_TYPES),
+ )
return loadingContext, workflowobj, uri
if isinstance(argsworkflow, MutableMapping):
uri = (
@@ -164,7 +167,7 @@ def _convert_stdstreams_to_files(
):
if not isinstance(out, CommentedMap):
raise ValidationException(
- "Output '{}' is not a valid OutputParameter.".format(out)
+ f"Output '{out}' is not a valid OutputParameter."
)
for streamtype in ["stdout", "stderr"]:
if out.get("type") == streamtype:
@@ -309,9 +312,11 @@ def resolve_and_validate_document(
)
if not isinstance(cwlVersion, str):
- with SourceLine(workflowobj, "cwlVersion", ValidationException):
+ with SourceLine(
+ workflowobj, "cwlVersion", ValidationException, loadingContext.debug
+ ):
raise ValidationException(
- "'cwlVersion' must be a string, got {}".format(type(cwlVersion))
+ f"'cwlVersion' must be a string, got {type(cwlVersion)}"
)
# strip out version
cwlVersion = re.sub(r"^(?:cwl:|https://w3id.org/cwl/cwl#)", "", cwlVersion)
diff --git a/cwltool/main.py b/cwltool/main.py
index 78821028b7..5a5494ec4a 100755
--- a/cwltool/main.py
+++ b/cwltool/main.py
@@ -8,9 +8,11 @@
import logging
import os
import signal
+import subprocess # nosec
import sys
import time
import urllib
+import warnings
from codecs import StreamWriter, getwriter
from collections.abc import MutableMapping, MutableSequence
from typing import (
@@ -33,25 +35,20 @@
import argcomplete
import coloredlogs
import pkg_resources # part of setuptools
-from ruamel import yaml
+import ruamel.yaml
from ruamel.yaml.comments import CommentedMap, CommentedSeq
+from ruamel.yaml.main import YAML
from schema_salad.exceptions import ValidationException
-from schema_salad.ref_resolver import (
- ContextType,
- FetcherCallableType,
- Loader,
- file_uri,
- uri_file_path,
-)
+from schema_salad.ref_resolver import Loader, file_uri, uri_file_path
from schema_salad.sourceline import strip_dup_lineno
-from schema_salad.utils import json_dumps
+from schema_salad.utils import ContextType, FetcherCallableType, json_dumps, yaml_no_ts
-from . import command_line_tool, workflow
+from . import CWL_CONTENT_TYPES, workflow
from .argparser import arg_parser, generate_parser, get_default_args
from .builder import HasReqsHints
from .context import LoadingContext, RuntimeContext, getdefault
from .cwlrdf import printdot, printrdf
-from .errors import UnsupportedRequirement, WorkflowException
+from .errors import ArgumentException, UnsupportedRequirement, WorkflowException
from .executors import JobExecutor, MultithreadedJobExecutor, SingleJobExecutor
from .load_tool import (
default_loader,
@@ -77,7 +74,7 @@
use_standard_schema,
)
from .procgenerator import ProcessGenerator
-from .provenance import ResearchObject
+from .provenance import ResearchObject, WritableBagFile
from .resolver import ga4gh_tool_registries, tool_resolver
from .secrets import SecretStore
from .software_requirements import (
@@ -85,7 +82,7 @@
get_container_from_software_requirements,
)
from .stdfsaccess import StdFsAccess
-from .subgraph import get_subgraph
+from .subgraph import get_process, get_step, get_subgraph
from .update import ALLUPDATES, UPDATES
from .utils import (
DEFAULT_TMP_PREFIX,
@@ -94,12 +91,10 @@
CWLOutputType,
adjustDirObjs,
normalizeFilesDirs,
- onWindows,
processes_to_kill,
trim_listing,
versionstring,
visit_class,
- windows_default_container_id,
)
from .workflow import Workflow
@@ -118,7 +113,22 @@ def _terminate_processes() -> None:
# It's possible that another thread will spawn a new task while
# we're executing, so it's not safe to use a for loop here.
while processes_to_kill:
- processes_to_kill.popleft().kill()
+ process = processes_to_kill.popleft()
+ cidfile = [
+ str(arg).split("=")[1] for arg in process.args if "--cidfile" in str(arg)
+ ]
+ if cidfile:
+ try:
+ with open(cidfile[0]) as inp_stream:
+ p = subprocess.Popen( # nosec
+ ["docker", "kill", inp_stream.read()], shell=False # nosec
+ )
+ try:
+ p.wait(timeout=10)
+ except subprocess.TimeoutExpired:
+ p.kill()
+ except FileNotFoundError:
+ pass
def _signal_handler(signum: int, _: Any) -> None:
@@ -149,10 +159,10 @@ def generate_example_input(
"float": 0.1,
"double": 0.1,
"string": "a_string",
- "File": yaml.comments.CommentedMap(
+ "File": ruamel.yaml.comments.CommentedMap(
[("class", "File"), ("path", "a/file/path")]
),
- "Directory": yaml.comments.CommentedMap(
+ "Directory": ruamel.yaml.comments.CommentedMap(
[("class", "Directory"), ("path", "a/directory/path")]
),
} # type: CWLObjectType
@@ -165,7 +175,7 @@ def generate_example_input(
example, comment = generate_example_input(inptype[0], default)
if optional:
if comment:
- comment = "{} (optional)".format(comment)
+ comment = f"{comment} (optional)"
else:
comment = "optional"
else:
@@ -184,7 +194,7 @@ def generate_example_input(
# array of just an enum then list all the options
example = first_item["symbols"]
if "name" in first_item:
- comment = u'array of type "{}".'.format(first_item["name"])
+ comment = 'array of type "{}".'.format(first_item["name"])
else:
value, comment = generate_example_input(inptype["items"], None)
comment = "array of " + comment
@@ -204,27 +214,27 @@ def generate_example_input(
example = symbols[0]
else:
example = "{}_enum_value".format(inptype.get("name", "valid"))
- comment = u'enum; valid values: "{}"'.format('", "'.join(symbols))
+ comment = 'enum; valid values: "{}"'.format('", "'.join(symbols))
elif inptype["type"] == "record":
- example = yaml.comments.CommentedMap()
+ example = ruamel.yaml.comments.CommentedMap()
if "name" in inptype:
- comment = u'"{}" record type.'.format(inptype["name"])
+ comment = '"{}" record type.'.format(inptype["name"])
for field in cast(List[CWLObjectType], inptype["fields"]):
value, f_comment = generate_example_input(field["type"], None)
example.insert(0, shortname(cast(str, field["name"])), value, f_comment)
elif "default" in inptype:
example = inptype["default"]
- comment = u'default value of type "{}".'.format(inptype["type"])
+ comment = 'default value of type "{}".'.format(inptype["type"])
else:
example = defaults.get(cast(str, inptype["type"]), str(inptype))
- comment = u'type "{}".'.format(inptype["type"])
+ comment = 'type "{}".'.format(inptype["type"])
else:
if not default:
example = defaults.get(str(inptype), str(inptype))
- comment = u'type "{}"'.format(inptype)
+ comment = f'type "{inptype}"'
else:
example = default
- comment = u'default value of type "{}".'.format(inptype)
+ comment = f'default value of type "{inptype}".'
return example, comment
@@ -294,7 +304,7 @@ def realize_input_schema(
def generate_input_template(tool: Process) -> CWLObjectType:
"""Generate an example input object for the given CWL process."""
- template = yaml.comments.CommentedMap()
+ template = ruamel.yaml.comments.CommentedMap()
for inp in realize_input_schema(tool.tool["inputs"], tool.schemaDefs):
name = shortname(cast(str, inp["id"]))
value, comment = generate_example_input(inp["type"], inp.get("default", None))
@@ -319,7 +329,8 @@ def load_job_order(
if len(args.job_order) == 1 and args.job_order[0][0] != "-":
job_order_file = args.job_order[0]
elif len(args.job_order) == 1 and args.job_order[0] == "-":
- job_order_object = yaml.main.round_trip_load(stdin)
+ yaml = yaml_no_ts()
+ job_order_object = yaml.load(stdin)
job_order_object, _ = loader.resolve_all(
job_order_object, file_uri(os.getcwd()) + "/"
)
@@ -334,7 +345,11 @@ def load_job_order(
if args.basedir
else os.path.abspath(os.path.dirname(job_order_file))
)
- job_order_object, _ = loader.resolve_ref(job_order_file, checklinks=False)
+ job_order_object, _ = loader.resolve_ref(
+ job_order_file,
+ checklinks=False,
+ content_types=CWL_CONTENT_TYPES,
+ )
if (
job_order_object is not None
@@ -428,7 +443,7 @@ def init_job_order(
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(
"Parsed job order from command line: %s",
- json_dumps(job_order_object, indent=4),
+ json_dumps(job_order_object, indent=4, default=str),
)
for inp in process.tool["inputs"]:
@@ -442,7 +457,7 @@ def init_job_order(
if job_order_object is None:
if process.tool["inputs"]:
if toolparser is not None:
- print("\nOptions for {} ".format(args.workflow))
+ print(f"\nOptions for {args.workflow} ")
toolparser.print_help()
_logger.error("")
_logger.error("Input object required, use --help for details")
@@ -531,7 +546,7 @@ def printdeps(
elif relative_deps == "cwd":
base = os.getcwd()
visit_class(deps, ("File", "Directory"), functools.partial(make_relative, base))
- stdout.write(json_dumps(deps, indent=4))
+ stdout.write(json_dumps(deps, indent=4, default=str))
def prov_deps(
@@ -593,9 +608,9 @@ def print_pack(
"""Return a CWL serialization of the CWL document in JSON."""
packed = pack(loadingContext, uri)
if len(cast(Sized, packed["$graph"])) > 1:
- return json_dumps(packed, indent=4)
+ return json_dumps(packed, indent=4, default=str)
return json_dumps(
- cast(MutableSequence[CWLObjectType], packed["$graph"])[0], indent=4
+ cast(MutableSequence[CWLObjectType], packed["$graph"])[0], indent=4, default=str
)
@@ -662,13 +677,15 @@ class ProvLogFormatter(logging.Formatter):
def __init__(self) -> None:
"""Use the default formatter with our custom formatstring."""
- super(ProvLogFormatter, self).__init__("[%(asctime)sZ] %(message)s")
+ super().__init__("[%(asctime)sZ] %(message)s")
def formatTime(
self, record: logging.LogRecord, datefmt: Optional[str] = None
) -> str:
- formatted_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(record.created))
- with_msecs = "%s,%03f" % (formatted_time, record.msecs)
+ formatted_time = time.strftime(
+ "%Y-%m-%dT%H:%M:%S", time.gmtime(float(record.created))
+ )
+ with_msecs = f"{formatted_time},{record.msecs:03f}"
return with_msecs
@@ -676,10 +693,10 @@ def setup_provenance(
args: argparse.Namespace,
argsl: List[str],
runtimeContext: RuntimeContext,
-) -> Optional[int]:
+) -> Union[io.TextIOWrapper, WritableBagFile]:
if not args.compute_checksum:
_logger.error("--provenance incompatible with --no-compute-checksum")
- return 1
+ raise ArgumentException()
ro = ResearchObject(
getdefault(runtimeContext.make_fs_access, StdFsAccess)(""),
temp_prefix_ro=args.tmpdir_prefix,
@@ -688,7 +705,7 @@ def setup_provenance(
)
runtimeContext.research_obj = ro
log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
- prov_log_handler = logging.StreamHandler(cast(IO[str], log_file_io))
+ prov_log_handler = logging.StreamHandler(log_file_io)
prov_log_handler.setFormatter(ProvLogFormatter())
_logger.addHandler(prov_log_handler)
@@ -697,7 +714,7 @@ def setup_provenance(
# Log cwltool command line options to provenance file
_logger.info("[cwltool] %s %s", sys.argv[0], " ".join(argsl))
_logger.debug("[cwltool] Arguments: %s", args)
- return None
+ return log_file_io
def setup_loadingContext(
@@ -739,13 +756,16 @@ def my_represent_none(
"""Force clean representation of 'null'."""
return self.represent_scalar("tag:yaml.org,2002:null", "null")
- yaml.representer.RoundTripRepresenter.add_representer(type(None), my_represent_none)
- yaml.main.round_trip_dump(
+ ruamel.yaml.representer.RoundTripRepresenter.add_representer(
+ type(None), my_represent_none
+ )
+ yaml = YAML()
+ yaml.default_flow_style = False
+ yaml.indent = 4
+ yaml.block_seq_indent = 2
+ yaml.dump(
generate_input_template(tool),
sys.stdout,
- default_flow_style=False,
- indent=4,
- block_seq_indent=2,
)
@@ -754,7 +774,7 @@ def choose_target(
tool: Process,
loadingContext: LoadingContext,
) -> Optional[Process]:
- """Walk the given Workflow and find the process that matches args.target."""
+ """Walk the Workflow, extract the subset matches all the args.targets."""
if loadingContext.loader is None:
raise Exception("loadingContext.loader cannot be None")
@@ -784,6 +804,75 @@ def choose_target(
return tool
+def choose_step(
+ args: argparse.Namespace,
+ tool: Process,
+ loadingContext: LoadingContext,
+) -> Optional[Process]:
+ """Walk the given Workflow and extract just args.single_step."""
+ if loadingContext.loader is None:
+ raise Exception("loadingContext.loader cannot be None")
+
+ if isinstance(tool, Workflow):
+ url = urllib.parse.urlparse(tool.tool["id"])
+ if url.fragment:
+ extracted = get_step(tool, tool.tool["id"] + "/" + args.single_step)
+ else:
+ extracted = get_step(
+ tool,
+ loadingContext.loader.fetcher.urljoin(
+ tool.tool["id"], "#" + args.single_step
+ ),
+ )
+ else:
+ _logger.error("Can only use --single-step on Workflows")
+ return None
+ if isinstance(loadingContext.loader.idx, MutableMapping):
+ loadingContext.loader.idx[extracted["id"]] = extracted
+ tool = make_tool(extracted["id"], loadingContext)
+ else:
+ raise Exception("Missing loadingContext.loader.idx!")
+
+ return tool
+
+
+def choose_process(
+ args: argparse.Namespace,
+ tool: Process,
+ loadingContext: LoadingContext,
+) -> Optional[Process]:
+ """Walk the given Workflow and extract just args.single_step."""
+ if loadingContext.loader is None:
+ raise Exception("loadingContext.loader cannot be None")
+
+ if isinstance(tool, Workflow):
+ url = urllib.parse.urlparse(tool.tool["id"])
+ if url.fragment:
+ extracted = get_process(
+ tool,
+ tool.tool["id"] + "/" + args.single_process,
+ loadingContext.loader.idx,
+ )
+ else:
+ extracted = get_process(
+ tool,
+ loadingContext.loader.fetcher.urljoin(
+ tool.tool["id"], "#" + args.single_process
+ ),
+ loadingContext.loader.idx,
+ )
+ else:
+ _logger.error("Can only use --single-process on Workflows")
+ return None
+ if isinstance(loadingContext.loader.idx, MutableMapping):
+ loadingContext.loader.idx[extracted["id"]] = extracted
+ tool = make_tool(extracted["id"], loadingContext)
+ else:
+ raise Exception("Missing loadingContext.loader.idx!")
+
+ return tool
+
+
def check_working_directories(
runtimeContext: RuntimeContext,
) -> Optional[int]:
@@ -869,13 +958,6 @@ def main(
else:
runtimeContext = runtimeContext.copy()
- # If on Windows platform, a default Docker Container is used if not
- # explicitely provided by user
- if onWindows() and not runtimeContext.default_container:
- # This docker image is a minimal alpine image with bash installed
- # (size 6 mb). source: https://github.com/frol/docker-alpine-bash
- runtimeContext.default_container = windows_default_container_id
-
# If caller parsed its own arguments, it may not include every
# cwltool option, so fill in defaults to avoid crashing when
# dereferencing them in args.
@@ -901,8 +983,6 @@ def main(
_logger.error("CWL document required, no input file was provided")
parser.print_help()
return 1
- if args.relax_path_checks:
- command_line_tool.ACCEPTLIST_RE = command_line_tool.ACCEPTLIST_EN_RELAXED_RE
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
@@ -914,10 +994,13 @@ def main(
setup_schema(args, custom_schema_callback)
+ prov_log_stream: Optional[Union[io.TextIOWrapper, WritableBagFile]] = None
if args.provenance:
if argsl is None:
raise Exception("argsl cannot be None")
- if setup_provenance(args, argsl, runtimeContext) is not None:
+ try:
+ prov_log_stream = setup_provenance(args, argsl, runtimeContext)
+ except ArgumentException:
return 1
loadingContext = setup_loadingContext(loadingContext, runtimeContext, args)
@@ -967,7 +1050,7 @@ def main(
if loadingContext.loader is None:
raise Exception("Impossible code path.")
processobj, metadata = loadingContext.loader.resolve_ref(uri)
- processobj = cast(CommentedMap, processobj)
+ processobj = cast(Union[CommentedMap, CommentedSeq], processobj)
if args.pack:
stdout.write(print_pack(loadingContext, uri))
return 0
@@ -981,7 +1064,11 @@ def main(
if args.print_pre:
stdout.write(
json_dumps(
- processobj, indent=4, sort_keys=True, separators=(",", ": ")
+ processobj,
+ indent=4,
+ sort_keys=True,
+ separators=(",", ": "),
+ default=str,
)
)
return 0
@@ -992,7 +1079,7 @@ def main(
return 0
if args.validate:
- print("{} is valid CWL.".format(args.workflow))
+ print(f"{args.workflow} is valid CWL.")
return 0
if args.print_rdf:
@@ -1023,12 +1110,30 @@ def main(
else:
tool = ctool
+ elif args.single_step:
+ ctool = choose_step(args, tool, loadingContext)
+ if ctool is None:
+ return 1
+ else:
+ tool = ctool
+
+ elif args.single_process:
+ ctool = choose_process(args, tool, loadingContext)
+ if ctool is None:
+ return 1
+ else:
+ tool = ctool
+
if args.print_subgraph:
if "name" in tool.tool:
del tool.tool["name"]
stdout.write(
json_dumps(
- tool.tool, indent=4, sort_keys=True, separators=(",", ": ")
+ tool.tool,
+ indent=4,
+ sort_keys=True,
+ separators=(",", ": "),
+ default=str,
)
)
return 0
@@ -1191,7 +1296,9 @@ def loc_to_path(obj: CWLObjectType) -> None:
if isinstance(out, str):
stdout.write(out)
else:
- stdout.write(json_dumps(out, indent=4, ensure_ascii=False))
+ stdout.write(
+ json_dumps(out, indent=4, ensure_ascii=False, default=str)
+ )
stdout.write("\n")
if hasattr(stdout, "flush"):
stdout.flush()
@@ -1258,7 +1365,10 @@ def loc_to_path(obj: CWLObjectType) -> None:
# Ensure last log lines are written out
prov_log_handler.flush()
# Underlying WritableBagFile will add the tagfile to the manifest
- prov_log_handler.stream.close()
+ if prov_log_stream:
+ prov_log_stream.close()
+ # Why not use prov_log_handler.stream ? That is not part of the
+ # public API for logging.StreamHandler
prov_log_handler.close()
research_obj.close(args.provenance)
@@ -1279,9 +1389,25 @@ def find_default_container(
return default_container
-def run(*args, **kwargs):
- # type: (*Any, **Any) -> None
+def windows_check() -> None:
+ """See if we are running on MS Windows and warn about the lack of support."""
+ if os.name == "nt":
+ warnings.warn(
+ "The CWL reference runner (cwltool) no longer supports running "
+ "CWL workflows natively on MS Windows as its previous MS Windows "
+ "support was incomplete and untested. Instead, please see "
+ "https://pypi.org/project/cwltool/#ms-windows-users "
+ "for instructions on running cwltool via "
+ "Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
+ "CWL documents, then you can ignore this warning, but please "
+ "consider migrating to https://pypi.org/project/cwl-utils/ "
+ "for your CWL document processing needs."
+ )
+
+
+def run(*args: Any, **kwargs: Any) -> None:
"""Run cwltool."""
+ windows_check()
signal.signal(signal.SIGTERM, _signal_handler)
try:
sys.exit(main(*args, **kwargs))
diff --git a/cwltool/mpi.py b/cwltool/mpi.py
index 3ec23d23ac..a660177962 100644
--- a/cwltool/mpi.py
+++ b/cwltool/mpi.py
@@ -4,7 +4,7 @@
import re
from typing import List, Mapping, MutableMapping, Optional, Type, TypeVar, Union
-from ruamel import yaml
+from schema_salad.utils import yaml_no_ts
MpiConfigT = TypeVar("MpiConfigT", bound="MpiConfig")
@@ -53,12 +53,13 @@ def load(cls: Type[MpiConfigT], config_file_name: str) -> MpiConfigT:
optional).
"""
with open(config_file_name) as cf:
- data = yaml.round_trip_load(cf)
+ yaml = yaml_no_ts()
+ data = yaml.load(cf)
try:
return cls(**data)
except TypeError as e:
unknown = set(data.keys()) - set(inspect.signature(cls).parameters)
- raise ValueError("Unknown key(s) in MPI configuration: {}".format(unknown))
+ raise ValueError(f"Unknown key(s) in MPI configuration: {unknown}")
def pass_through_env_vars(self, env: MutableMapping[str, str]) -> None:
"""Take the configured list of environment variables and pass them to the executed process."""
diff --git a/cwltool/mutation.py b/cwltool/mutation.py
index b1dc88a56c..65f894e167 100644
--- a/cwltool/mutation.py
+++ b/cwltool/mutation.py
@@ -4,12 +4,12 @@
from .errors import WorkflowException
from .utils import CWLObjectType
-MutationState = namedtuple("MutationTracker", ["generation", "readers", "stepname"])
+MutationState = namedtuple("MutationState", ["generation", "readers", "stepname"])
_generation = "http://commonwl.org/cwltool#generation"
-class MutationManager(object):
+class MutationManager:
"""Lock manager for checking correctness of in-place update of files.
Used to validate that in-place file updates happen sequentially, and that a
diff --git a/cwltool/pack.py b/cwltool/pack.py
index 92d864bc7b..93d0abb92f 100644
--- a/cwltool/pack.py
+++ b/cwltool/pack.py
@@ -15,12 +15,13 @@
)
from ruamel.yaml.comments import CommentedMap, CommentedSeq
-from schema_salad.ref_resolver import Loader, ResolveType, SubLoader
+from schema_salad.ref_resolver import Loader, SubLoader
+from schema_salad.utils import ResolveType
from .context import LoadingContext
from .load_tool import fetch_document, resolve_and_validate_document
from .process import shortname, uniquename
-from .update import ORDERED_VERSIONS, update
+from .update import ORDERED_VERSIONS, ORIGINAL_CWLVERSION, update
from .utils import CWLObjectType, CWLOutputType
LoadRefType = Callable[[Optional[str], str], ResolveType]
@@ -242,15 +243,20 @@ def rewrite_id(r: str, mainuri: str) -> None:
update_to_version,
)
- if "http://commonwl.org/cwltool#original_cwlVersion" in metadata:
- del metadata["http://commonwl.org/cwltool#original_cwlVersion"]
- if "http://commonwl.org/cwltool#original_cwlVersion" in dcr:
- del dcr["http://commonwl.org/cwltool#original_cwlVersion"]
+ if ORIGINAL_CWLVERSION in metadata:
+ del metadata[ORIGINAL_CWLVERSION]
+ if ORIGINAL_CWLVERSION in dcr:
+ del dcr[ORIGINAL_CWLVERSION]
if "$schemas" in metadata:
for s in metadata["$schemas"]:
schemas.add(s)
- if dcr.get("class") not in ("Workflow", "CommandLineTool", "ExpressionTool"):
+ if dcr.get("class") not in (
+ "Workflow",
+ "CommandLineTool",
+ "ExpressionTool",
+ "Operation",
+ ):
continue
dc = cast(Dict[str, Any], copy.deepcopy(dcr))
v = rewrite[r]
@@ -262,6 +268,8 @@ def rewrite_id(r: str, mainuri: str) -> None:
if schemas:
packed["$schemas"] = list(schemas)
+ if namespaces:
+ packed["$namespaces"] = namespaces
for r in list(rewrite.keys()):
v = rewrite[r]
@@ -270,14 +278,13 @@ def rewrite_id(r: str, mainuri: str) -> None:
import_embed(packed, set())
if len(packed["$graph"]) == 1:
- # duplicate 'cwlVersion' and $schemas inside $graph when there is only
- # a single item because we will print the contents inside '$graph'
- # rather than whole dict
+ # duplicate 'cwlVersion', '$schemas', and '$namespaces' inside '$graph'
+ # when there is only a single item because main.print_pack() will print
+ # the contents inside '$graph' rather than whole dict in this case
packed["$graph"][0]["cwlVersion"] = packed["cwlVersion"]
if schemas:
packed["$graph"][0]["$schemas"] = list(schemas)
- # always include $namespaces in the #main
- if namespaces:
- packed["$graph"][0]["$namespaces"] = namespaces
+ if namespaces:
+ packed["$graph"][0]["$namespaces"] = namespaces
return packed
diff --git a/cwltool/pathmapper.py b/cwltool/pathmapper.py
index 6d836e6427..3c73df6960 100644
--- a/cwltool/pathmapper.py
+++ b/cwltool/pathmapper.py
@@ -12,14 +12,14 @@
from .loghandler import _logger
from .stdfsaccess import abspath
-from .utils import CWLObjectType, convert_pathsep_to_unix, dedup, downloadHttpFile
+from .utils import CWLObjectType, dedup, downloadHttpFile
MapperEnt = collections.namedtuple(
"MapperEnt", ["resolved", "target", "type", "staged"]
)
-class PathMapper(object):
+class PathMapper:
"""
Mapping of files from relative path provided in the file to a tuple.
@@ -95,11 +95,9 @@ def visit(
staged: bool = False,
) -> None:
stagedir = cast(Optional[str], obj.get("dirname")) or stagedir
- tgt = convert_pathsep_to_unix(
- os.path.join(
- stagedir,
- cast(str, obj["basename"]),
- )
+ tgt = os.path.join(
+ stagedir,
+ cast(str, obj["basename"]),
)
if obj["location"] in self._pathmap:
return
diff --git a/cwltool/process.py b/cwltool/process.py
index ea2655e5d3..9079e29b0c 100644
--- a/cwltool/process.py
+++ b/cwltool/process.py
@@ -12,7 +12,6 @@
import textwrap
import urllib
import uuid
-from io import open
from os import scandir
from typing import (
Any,
@@ -46,11 +45,11 @@
from schema_salad.schema import load_schema, make_avro_schema, make_valid_avro
from schema_salad.sourceline import SourceLine, strip_dup_lineno
from schema_salad.utils import convert_to_dict
-from schema_salad.validate import validate_ex
+from schema_salad.validate import avro_type_name, validate_ex
from typing_extensions import TYPE_CHECKING
from . import expression
-from .builder import Builder, HasReqsHints
+from .builder import INPUT_OBJ_VOCAB, Builder, HasReqsHints
from .context import LoadingContext, RuntimeContext, getdefault
from .errors import UnsupportedRequirement, WorkflowException
from .loghandler import _logger
@@ -58,7 +57,7 @@
from .pathmapper import MapperEnt, PathMapper
from .secrets import SecretStore
from .stdfsaccess import StdFsAccess
-from .update import INTERNAL_VERSION
+from .update import INTERNAL_VERSION, ORIGINAL_CWLVERSION
from .utils import (
CWLObjectType,
CWLOutputAtomType,
@@ -68,11 +67,9 @@
adjustDirObjs,
aslist,
cmp_like_py2,
- copytree_with_merge,
ensure_writable,
get_listing,
normalizeFilesDirs,
- onWindows,
random_outdir,
visit_class,
)
@@ -86,7 +83,7 @@ class LogAsDebugFilter(logging.Filter):
def __init__(self, name: str, parent: logging.Logger) -> None:
"""Initialize."""
name = str(name)
- super(LogAsDebugFilter, self).__init__(name)
+ super().__init__(name)
self.parent = parent
def filter(self, record: logging.LogRecord) -> bool:
@@ -196,23 +193,23 @@ def get_schema(
version = ".".join(version.split(".")[:-1])
for f in cwl_files:
try:
- res = resource_stream(__name__, "schemas/%s/%s" % (version, f))
+ res = resource_stream(__name__, f"schemas/{version}/{f}")
cache["https://w3id.org/cwl/" + f] = res.read().decode("UTF-8")
res.close()
- except IOError:
+ except OSError:
pass
for f in salad_files:
try:
res = resource_stream(
__name__,
- "schemas/{}/salad/schema_salad/metaschema/{}".format(version, f),
+ f"schemas/{version}/salad/schema_salad/metaschema/{f}",
)
cache[
"https://w3id.org/cwl/salad/schema_salad/metaschema/" + f
] = res.read().decode("UTF-8")
res.close()
- except IOError:
+ except OSError:
pass
if version in custom_schemas:
@@ -239,13 +236,14 @@ def checkRequirements(
) -> None:
if isinstance(rec, MutableMapping):
if "requirements" in rec:
+ debug = _logger.isEnabledFor(logging.DEBUG)
for i, entry in enumerate(
cast(MutableSequence[CWLObjectType], rec["requirements"])
):
- with SourceLine(rec["requirements"], i, UnsupportedRequirement):
+ with SourceLine(rec["requirements"], i, UnsupportedRequirement, debug):
if cast(str, entry["class"]) not in supported_process_requirements:
raise UnsupportedRequirement(
- "Unsupported requirement {}".format(entry["class"])
+ f"Unsupported requirement {entry['class']}."
)
for key in rec:
checkRequirements(rec[key], supported_process_requirements)
@@ -274,10 +272,10 @@ def stage_files(
# find first key that does not clash with an existing entry in targets
# start with entry.target + '_' + 2 and then keep incrementing the number till there is no clash
i = 2
- tgt = "%s_%s" % (entry.target, i)
+ tgt = f"{entry.target}_{i}"
while tgt in targets:
i += 1
- tgt = "%s_%s" % (entry.target, i)
+ tgt = f"{entry.target}_{i}"
targets[tgt] = pathmapper.update(
key, entry.resolved, tgt, entry.type, entry.staged
)
@@ -294,15 +292,7 @@ def stage_files(
os.makedirs(os.path.dirname(entry.target))
if entry.type in ("File", "Directory") and os.path.exists(entry.resolved):
if symlink: # Use symlink func if allowed
- if onWindows():
- if entry.type == "File":
- shutil.copy(entry.resolved, entry.target)
- elif entry.type == "Directory":
- if os.path.exists(entry.target) and os.path.isdir(entry.target):
- shutil.rmtree(entry.target)
- copytree_with_merge(entry.resolved, entry.target)
- else:
- os.symlink(entry.resolved, entry.target)
+ os.symlink(entry.resolved, entry.target)
elif stage_func is not None:
stage_func(entry.resolved, entry.target)
elif (
@@ -319,7 +309,7 @@ def stage_files(
os.makedirs(entry.target)
else:
shutil.copytree(entry.resolved, entry.target)
- ensure_writable(entry.target)
+ ensure_writable(entry.target, include_root=True)
elif entry.type == "CreateFile" or entry.type == "CreateWritableFile":
with open(entry.target, "wb") as new:
if secret_store is not None:
@@ -357,12 +347,10 @@ def _collectDirEntries(
yield obj
else:
for sub_obj in obj.values():
- for dir_entry in _collectDirEntries(sub_obj):
- yield dir_entry
+ yield from _collectDirEntries(sub_obj)
elif isinstance(obj, MutableSequence):
for sub_obj in obj:
- for dir_entry in _collectDirEntries(sub_obj):
- yield dir_entry
+ yield from _collectDirEntries(sub_obj)
def _relocate(src: str, dst: str) -> None:
if src == dst:
@@ -451,10 +439,9 @@ def fill_in_defaults(
job: CWLObjectType,
fsaccess: StdFsAccess,
) -> None:
+ debug = _logger.isEnabledFor(logging.DEBUG)
for e, inp in enumerate(inputs):
- with SourceLine(
- inputs, e, WorkflowException, _logger.isEnabledFor(logging.DEBUG)
- ):
+ with SourceLine(inputs, e, WorkflowException, debug):
fieldname = shortname(cast(str, inp["id"]))
if job.get(fieldname) is not None:
pass
@@ -470,30 +457,34 @@ def fill_in_defaults(
def avroize_type(
- field_type: Union[
- CWLObjectType, MutableSequence[CWLOutputType], CWLOutputType, None
- ],
+ field_type: Union[CWLObjectType, MutableSequence[Any], CWLOutputType, None],
name_prefix: str = "",
-) -> None:
+) -> Union[CWLObjectType, MutableSequence[Any], CWLOutputType, None]:
"""Add missing information to a type so that CWL types are valid."""
if isinstance(field_type, MutableSequence):
- for field in field_type:
- avroize_type(field, name_prefix)
+ for i, field in enumerate(field_type):
+ field_type[i] = avroize_type(field, name_prefix)
elif isinstance(field_type, MutableMapping):
if field_type["type"] in ("enum", "record"):
if "name" not in field_type:
field_type["name"] = name_prefix + str(uuid.uuid4())
if field_type["type"] == "record":
- avroize_type(
+ field_type["fields"] = avroize_type(
cast(MutableSequence[CWLOutputType], field_type["fields"]), name_prefix
)
- if field_type["type"] == "array":
- avroize_type(
+ elif field_type["type"] == "array":
+ field_type["items"] = avroize_type(
cast(MutableSequence[CWLOutputType], field_type["items"]), name_prefix
)
- if isinstance(field_type["type"], MutableSequence):
- for ctype in field_type["type"]:
- avroize_type(cast(CWLOutputType, ctype), name_prefix)
+ else:
+ field_type["type"] = avroize_type(
+ cast(CWLOutputType, field_type["type"]), name_prefix
+ )
+ elif field_type == "File":
+ return "org.w3id.cwl.cwl.File"
+ elif field_type == "Directory":
+ return "org.w3id.cwl.cwl.Directory"
+ return field_type
def get_overrides(
@@ -568,7 +559,7 @@ def __init__(
self, toolpath_object: CommentedMap, loadingContext: LoadingContext
) -> None:
"""Build a Process object from the provided dictionary."""
- super(Process, self).__init__()
+ super().__init__()
self.metadata = getdefault(loadingContext.metadata, {}) # type: CWLObjectType
self.provenance_object = None # type: Optional[ProvenanceProfile]
self.parent_wf = None # type: Optional[ProvenanceProfile]
@@ -590,8 +581,17 @@ def __init__(
self.names = make_avro_schema([SCHEMA_FILE, SCHEMA_DIR, SCHEMA_ANY], Loader({}))
self.tool = toolpath_object
+ debug = loadingContext.debug
self.requirements = copy.deepcopy(getdefault(loadingContext.requirements, []))
- self.requirements.extend(self.tool.get("requirements", []))
+ tool_requirements = self.tool.get("requirements", [])
+ if tool_requirements is None:
+ raise SourceLine(
+ self.tool, "requirements", ValidationException, debug
+ ).makeError(
+ "If 'requirements' is present then it must be a list "
+ "or map/dictionary, not empty."
+ )
+ self.requirements.extend(tool_requirements)
if "id" not in self.tool:
self.tool["id"] = "_:" + str(uuid.uuid4())
self.requirements.extend(
@@ -603,7 +603,13 @@ def __init__(
)
)
self.hints = copy.deepcopy(getdefault(loadingContext.hints, []))
- self.hints.extend(self.tool.get("hints", []))
+ tool_hints = self.tool.get("hints", [])
+ if tool_hints is None:
+ raise SourceLine(self.tool, "hints", ValidationException, debug).makeError(
+ "If 'hints' is present then it must be a list "
+ "or map/dictionary, not empty."
+ )
+ self.hints.extend(tool_hints)
# Versions of requirements and hints which aren't mutated.
self.original_requirements = copy.deepcopy(self.requirements)
self.original_hints = copy.deepcopy(self.hints)
@@ -626,12 +632,13 @@ def __init__(
sd, _ = self.get_requirement("SchemaDefRequirement")
if sd is not None:
- sdtypes = cast(MutableSequence[CWLObjectType], sd["types"])
+ sdtypes = copy.deepcopy(cast(MutableSequence[CWLObjectType], sd["types"]))
avroize_type(cast(MutableSequence[CWLOutputType], sdtypes))
av = make_valid_avro(
sdtypes,
{cast(str, t["name"]): cast(Dict[str, Any], t) for t in sdtypes},
set(),
+ vocab=INPUT_OBJ_VOCAB,
)
for i in av:
self.schemaDefs[i["name"]] = i # type: ignore
@@ -666,7 +673,8 @@ def __init__(
c["type"] = nullable
else:
c["type"] = c["type"]
- avroize_type(c["type"], c["name"])
+
+ c["type"] = avroize_type(c["type"], c["name"])
if key == "inputs":
cast(
List[CWLObjectType], self.inputs_record_schema["fields"]
@@ -676,13 +684,13 @@ def __init__(
List[CWLObjectType], self.outputs_record_schema["fields"]
).append(c)
- with SourceLine(toolpath_object, "inputs", ValidationException):
+ with SourceLine(toolpath_object, "inputs", ValidationException, debug):
self.inputs_record_schema = cast(
CWLObjectType,
make_valid_avro(self.inputs_record_schema, {}, set()),
)
make_avsc_object(convert_to_dict(self.inputs_record_schema), self.names)
- with SourceLine(toolpath_object, "outputs", ValidationException):
+ with SourceLine(toolpath_object, "outputs", ValidationException, debug):
self.outputs_record_schema = cast(
CWLObjectType,
make_valid_avro(self.outputs_record_schema, {}, set()),
@@ -706,9 +714,13 @@ def __init__(
)
raise
if self.doc_schema is not None:
+ classname = toolpath_object["class"]
+ avroname = classname
+ if self.doc_loader and classname in self.doc_loader.vocab:
+ avroname = avro_type_name(self.doc_loader.vocab[classname])
validate_js_expressions(
toolpath_object,
- self.doc_schema.names[toolpath_object["class"]],
+ self.doc_schema.names[avroname],
validate_js_options,
)
@@ -775,7 +787,13 @@ def _init_job(
raise WorkflowException(
"Missing input record schema: " "{}".format(self.names)
)
- validate_ex(schema, job, strict=False, logger=_logger_validation_warnings)
+ validate_ex(
+ schema,
+ job,
+ strict=False,
+ logger=_logger_validation_warnings,
+ vocab=INPUT_OBJ_VOCAB,
+ )
if load_listing and load_listing != "no_listing":
get_listing(fs_access, job, recursive=(load_listing == "deep_listing"))
@@ -860,7 +878,7 @@ def inc(d): # type: (List[int]) -> None
cwl_version = cast(
str,
- self.metadata.get("http://commonwl.org/cwltool#original_cwlVersion", None),
+ self.metadata.get(ORIGINAL_CWLVERSION, None),
)
builder = Builder(
job,
@@ -952,9 +970,7 @@ def evalResources(
resourceReq, _ = self.get_requirement("ResourceRequirement")
if resourceReq is None:
resourceReq = {}
- cwl_version = self.metadata.get(
- "http://commonwl.org/cwltool#original_cwlVersion", None
- )
+ cwl_version = self.metadata.get(ORIGINAL_CWLVERSION, None)
if cwl_version == "v1.0":
ram = 1024
else:
@@ -1012,25 +1028,30 @@ def evalResources(
def validate_hints(
self, avsc_names: Names, hints: List[CWLObjectType], strict: bool
) -> None:
+ if self.doc_loader is None:
+ return
+ debug = _logger.isEnabledFor(logging.DEBUG)
for i, r in enumerate(hints):
- sl = SourceLine(hints, i, ValidationException)
+ sl = SourceLine(hints, i, ValidationException, debug)
with sl:
- if (
- avsc_names.get_name(cast(str, r["class"]), None) is not None
- and self.doc_loader is not None
- ):
- plain_hint = dict(
- (key, r[key])
+ classname = cast(str, r["class"])
+ avroname = classname
+ if classname in self.doc_loader.vocab:
+ avroname = avro_type_name(self.doc_loader.vocab[classname])
+ if avsc_names.get_name(avroname, None) is not None:
+ plain_hint = {
+ key: r[key]
for key in r
if key not in self.doc_loader.identifiers
- ) # strip identifiers
+ } # strip identifiers
validate_ex(
cast(
Schema,
- avsc_names.get_name(cast(str, plain_hint["class"]), None),
+ avsc_names.get_name(avroname, None),
),
plain_hint,
strict=strict,
+ vocab=self.doc_loader.vocab,
)
elif r["class"] in ("NetworkAccess", "LoadListingRequirement"):
pass
@@ -1061,7 +1082,7 @@ def uniquename(stem: str, names: Optional[Set[str]] = None) -> str:
u = stem
while u in names:
c += 1
- u = "%s_%s" % (stem, c)
+ u = f"{stem}_{c}"
names.add(u)
return u
@@ -1153,7 +1174,21 @@ def scandeps(
if doc["class"] == "Directory" and "listing" in doc:
deps["listing"] = doc["listing"]
if doc["class"] == "File" and "secondaryFiles" in doc:
- deps["secondaryFiles"] = doc["secondaryFiles"]
+ deps["secondaryFiles"] = cast(
+ CWLOutputAtomType,
+ scandeps(
+ base,
+ cast(
+ Union[CWLObjectType, MutableSequence[CWLObjectType]],
+ doc["secondaryFiles"],
+ ),
+ reffields,
+ urlfields,
+ loadref,
+ urljoin=urljoin,
+ nestdirs=nestdirs,
+ ),
+ )
if nestdirs:
deps = nestdir(base, deps)
r.append(deps)
diff --git a/cwltool/procgenerator.py b/cwltool/procgenerator.py
index b09cc3651b..69780e32cc 100644
--- a/cwltool/procgenerator.py
+++ b/cwltool/procgenerator.py
@@ -13,7 +13,9 @@
from .utils import CWLObjectType, JobsGeneratorType, OutputCallbackType
-class ProcessGeneratorJob(object):
+class ProcessGeneratorJob:
+ """Result of ProcessGenerator.job()."""
+
def __init__(self, procgenerator: "ProcessGenerator") -> None:
"""Create a ProccessGenerator Job."""
self.procgenerator = procgenerator
@@ -34,10 +36,9 @@ def job(
) -> JobsGeneratorType:
try:
- for tool in self.procgenerator.embedded_tool.job(
+ yield from self.procgenerator.embedded_tool.job(
job_order, self.receive_output, runtimeContext
- ):
- yield tool
+ )
while self.processStatus is None:
yield None
@@ -53,8 +54,7 @@ def job(
job_order, self.jobout, runtimeContext
)
- for tool in created_tool.job(runinputs, output_callbacks, runtimeContext):
- yield tool
+ yield from created_tool.job(runinputs, output_callbacks, runtimeContext)
except WorkflowException:
raise
@@ -70,7 +70,7 @@ def __init__(
loadingContext: LoadingContext,
) -> None:
"""Create a ProcessGenerator from the given dictionary and context."""
- super(ProcessGenerator, self).__init__(toolpath_object, loadingContext)
+ super().__init__(toolpath_object, loadingContext)
self.loadingContext = loadingContext # type: LoadingContext
try:
if isinstance(toolpath_object["run"], CommentedMap):
diff --git a/cwltool/provenance.py b/cwltool/provenance.py
index 318ff474eb..3dc8e0395d 100644
--- a/cwltool/provenance.py
+++ b/cwltool/provenance.py
@@ -4,6 +4,7 @@
import datetime
import hashlib
import os
+import pwd
import re
import shutil
import tempfile
@@ -11,7 +12,7 @@
from array import array
from collections import OrderedDict
from getpass import getuser
-from io import FileIO, TextIOWrapper, open
+from io import FileIO, TextIOWrapper
from mmap import mmap
from pathlib import Path, PurePosixPath
from typing import (
@@ -63,20 +64,10 @@
CWLOutputType,
create_tmp_dir,
local_path,
- onWindows,
posix_path,
versionstring,
)
-# imports needed for retrieving user data
-if onWindows():
- import ctypes # pylint: disable=unused-import
-else:
- try:
- import pwd # pylint: disable=unused-import
- except ImportError:
- pass
-
if TYPE_CHECKING:
from .command_line_tool import ( # pylint: disable=unused-import
CommandLineTool,
@@ -89,16 +80,7 @@ def _whoami() -> Tuple[str, str]:
"""Return the current operating system account as (username, fullname)."""
username = getuser()
try:
- if onWindows():
- get_user_name = ctypes.windll.secur32.GetUserNameExW # type: ignore
- size = ctypes.pointer(ctypes.c_ulong(0))
- get_user_name(3, None, size)
-
- name_buffer = ctypes.create_unicode_buffer(size.contents.value)
- get_user_name(3, name_buffer, size)
- fullname = str(name_buffer.value)
- else:
- fullname = pwd.getpwuid(os.getuid())[4].split(",")[0]
+ fullname = pwd.getpwuid(os.getuid())[4].split(",")[0]
except (KeyError, IndexError):
fullname = username
@@ -126,7 +108,7 @@ def __init__(self, research_object: "ResearchObject", rel_path: str) -> None:
if not path.startswith(os.path.abspath(research_object.folder)):
raise ValueError("Path is outside Research Object: %s" % path)
_logger.debug("[provenance] Creating WritableBagFile at %s.", path)
- super(WritableBagFile, self).__init__(path, mode="w")
+ super().__init__(path, mode="w")
def write(self, b: Any) -> int:
"""Write some content to the Bag."""
@@ -134,7 +116,7 @@ def write(self, b: Any) -> int:
total = 0
length = len(real_b)
while total < length:
- ret = super(WritableBagFile, self).write(real_b)
+ ret = super().write(real_b)
if ret:
total += ret
for val in self.hashes.values():
@@ -149,7 +131,7 @@ def close(self) -> None:
else:
self.research_object.tagfiles.add(self.rel_path)
- super(WritableBagFile, self).close()
+ super().close()
# { "sha1": "f572d396fae9206628714fb2ce00f72e94f2258f" }
checksums = {}
for name in self.hashes:
@@ -171,7 +153,7 @@ def truncate(self, size: Optional[int] = None) -> int:
# FIXME: This breaks contract IOBase,
# as it means we would have to recalculate the hash
if size is not None:
- raise IOError("WritableBagFile can't truncate")
+ raise OSError("WritableBagFile can't truncate")
return self.tell()
@@ -243,14 +225,14 @@ def _valid_orcid(orcid: Optional[str]) -> str:
"116780-structure-of-the-orcid-identifier"
)
if not match:
- raise ValueError("Invalid ORCID: %s\n%s" % (orcid, help_url))
+ raise ValueError(f"Invalid ORCID: {orcid}\n{help_url}")
# Conservative in what we produce:
# a) Ensure any checksum digit is uppercase
orcid_num = match.group("orcid").upper()
# b) ..and correct
if not _check_mod_11_2(orcid_num):
- raise ValueError("Invalid ORCID checksum: %s\n%s" % (orcid_num, help_url))
+ raise ValueError(f"Invalid ORCID checksum: {orcid_num}\n{help_url}")
# c) Re-add the official prefix https://orcid.org/
return "https://orcid.org/%s" % orcid_num
@@ -330,7 +312,7 @@ def self_check(self) -> None:
def __str__(self) -> str:
"""Represent this RO as a string."""
- return "ResearchObject <{}> in <{}>".format(self.ro_uuid, self.folder)
+ return f"ResearchObject <{self.ro_uuid}> in <{self.folder}>"
def _initialize(self) -> None:
for research_obj_folder in (
@@ -349,7 +331,6 @@ def _initialize_bagit(self) -> None:
self.self_check()
bagit = os.path.join(self.folder, "bagit.txt")
# encoding: always UTF-8 (although ASCII would suffice here)
- # newline: ensure LF also on Windows
with open(bagit, "w", encoding=ENCODING, newline="\n") as bag_it_file:
# TODO: \n or \r\n ?
bag_it_file.write("BagIt-Version: 0.97\n")
@@ -366,8 +347,8 @@ def open_log_file_for_activity(
name = "engine"
else:
name = "activity"
- p = os.path.join(LOGS, "{}.{}.txt".format(name, activity_uuid))
- _logger.debug("[provenance] Opening log file for %s: %s" % (name, p))
+ p = os.path.join(LOGS, f"{name}.{activity_uuid}.txt")
+ _logger.debug(f"[provenance] Opening log file for {name}: {p}")
self.add_annotation(activity_uuid.urn, [p], CWLPROV["log"].uri)
return self.write_bag_file(p)
@@ -424,7 +405,6 @@ def write_bag_file(
bag_file = WritableBagFile(self, path)
if encoding is not None:
# encoding: match Tag-File-Character-Encoding: UTF-8
- # newline: ensure LF also on Windows
return TextIOWrapper(
cast(BinaryIO, bag_file), encoding=encoding, newline="\n"
)
@@ -712,7 +692,7 @@ def _write_ro_manifest(self) -> None:
manifest = OrderedDict(
{
"@context": [
- {"@base": "%s%s/" % (self.base_uri, posix_path(METADATA))},
+ {"@base": f"{self.base_uri}{posix_path(METADATA)}/"},
"https://w3id.org/bundle/context",
],
"id": "/",
@@ -831,7 +811,6 @@ def add_data_file(
os.rename(tmp.name, path)
# Relative posix path
- # (to avoid \ on Windows)
rel_path = posix_path(os.path.relpath(path, self.folder))
# Register in bagit checksum
@@ -882,15 +861,12 @@ def add_to_manifest(self, rel_path: str, checksums: Dict[str, str]) -> None:
for (method, hash_value) in checksums.items():
# File not in manifest because we bailed out on
# existence in bagged_size above
- manifestpath = os.path.join(
- self.folder, "%s-%s.txt" % (manifest, method.lower())
- )
+ manifestpath = os.path.join(self.folder, f"{manifest}-{method.lower()}.txt")
# encoding: match Tag-File-Character-Encoding: UTF-8
- # newline: ensure LF also on Windows
with open(
manifestpath, "a", encoding=ENCODING, newline="\n"
) as checksum_file:
- line = "%s %s\n" % (hash_value, rel_path)
+ line = f"{hash_value} {rel_path}\n"
_logger.debug("[provenance] Added to %s: %s", manifestpath, line)
checksum_file.write(line)
@@ -899,7 +875,7 @@ def _add_to_bagit(self, rel_path: str, **checksums: str) -> None:
raise ValueError("rel_path must be relative: %s" % rel_path)
lpath = os.path.join(self.folder, local_path(rel_path))
if not os.path.exists(lpath):
- raise IOError("File %s does not exist within RO: %s" % (rel_path, lpath))
+ raise OSError(f"File {rel_path} does not exist within RO: {lpath}")
if rel_path in self.bagged_size:
# Already added, assume checksum OK
@@ -982,7 +958,7 @@ def _relativise_files(
) as fp:
relative_path = self.add_data_file(fp)
checksum = PurePosixPath(relative_path).name
- structure["checksum"] = "%s$%s" % (SHA1, checksum)
+ structure["checksum"] = f"{SHA1}${checksum}"
if relative_path is not None:
# RO-relative path as new location
structure["location"] = str(PurePosixPath("..") / relative_path)
diff --git a/cwltool/provenance_profile.py b/cwltool/provenance_profile.py
index c5ecbd77ba..4475e49be1 100644
--- a/cwltool/provenance_profile.py
+++ b/cwltool/provenance_profile.py
@@ -59,13 +59,9 @@ def copy_job_order(
return job_order_object
customised_job = {} # type: CWLObjectType
# new job object for RO
+ debug = _logger.isEnabledFor(logging.DEBUG)
for each, i in enumerate(job.tool["inputs"]):
- with SourceLine(
- job.tool["inputs"],
- each,
- WorkflowException,
- _logger.isEnabledFor(logging.DEBUG),
- ):
+ with SourceLine(job.tool["inputs"], each, WorkflowException, debug):
iid = shortname(i["id"])
if iid in job_order_object:
customised_job[iid] = copy.deepcopy(job_order_object[iid])
@@ -116,7 +112,7 @@ def __init__(
def __str__(self) -> str:
"""Represent this Provenvance profile as a string."""
- return "ProvenanceProfile <%s> in <%s>" % (
+ return "ProvenanceProfile <{}> in <{}>".format(
self.workflow_run_uri,
self.research_object,
)
@@ -321,7 +317,7 @@ def declare_file(self, value: CWLObjectType) -> Tuple[ProvEntity, ProvEntity, st
"data:" + checksum, {PROV_TYPE: WFPROV["Artifact"]}
)
if "checksum" not in value:
- value["checksum"] = "%s$%s" % (SHA1, checksum)
+ value["checksum"] = f"{SHA1}${checksum}"
if not entity and "contents" in value:
# Anonymous file, add content as string
@@ -362,7 +358,7 @@ def declare_file(self, value: CWLObjectType) -> Tuple[ProvEntity, ProvEntity, st
elif sec["class"] == "Directory":
sec_entity = self.declare_directory(sec)
else:
- raise ValueError("Got unexpected secondaryFiles value: {}".format(sec))
+ raise ValueError(f"Got unexpected secondaryFiles value: {sec}")
# We don't know how/when/where the secondary file was generated,
# but CWL convention is a kind of summary/index derived
# from the original file. As its generally in a different format
@@ -626,7 +622,7 @@ def used_artefacts(
if name is not None:
base += "/" + name
for key, value in job_order.items():
- prov_role = self.wf_ns["%s/%s" % (base, key)]
+ prov_role = self.wf_ns[f"{base}/{key}"]
try:
entity = self.declare_artefact(value)
self.document.used(
@@ -661,7 +657,7 @@ def generate_output_prov(
if name is not None:
name = urllib.parse.quote(str(name), safe=":/,#")
# FIXME: Probably not "main" in nested workflows
- role = self.wf_ns["main/%s/%s" % (name, output)]
+ role = self.wf_ns[f"main/{name}/{output}"]
else:
role = self.wf_ns["main/%s" % output]
@@ -737,7 +733,7 @@ def finalize_prov_profile(self, name):
# workflows, but that's OK as we'll also include run uuid
# which also covers thhe case of this step being run in
# multiple places or iterations
- filename = "%s.%s.cwlprov" % (wf_name, self.workflow_run_uuid)
+ filename = f"{wf_name}.{self.workflow_run_uuid}.cwlprov"
basename = str(PurePosixPath(PROVENANCE) / filename)
diff --git a/cwltool/resolver.py b/cwltool/resolver.py
index f164bd8ffc..cb52281000 100644
--- a/cwltool/resolver.py
+++ b/cwltool/resolver.py
@@ -21,7 +21,7 @@ def resolve_local(document_loader: Optional[Loader], uri: str) -> Optional[str]:
if pathobj.is_file():
if frag:
- return "{}#{}".format(pathobj.as_uri(), frag)
+ return f"{pathobj.as_uri()}#{frag}"
return pathobj.as_uri()
sharepaths = [
@@ -39,8 +39,8 @@ def resolve_local(document_loader: Optional[Loader], uri: str) -> Optional[str]:
for path in shares:
if os.path.exists(path):
return Path(uri).as_uri()
- if os.path.exists("{}.cwl".format(path)):
- return Path("{}.cwl".format(path)).as_uri()
+ if os.path.exists(f"{path}.cwl"):
+ return Path(f"{path}.cwl").as_uri()
return None
diff --git a/cwltool/run_job.py b/cwltool/run_job.py
new file mode 100644
index 0000000000..93b4e8a94b
--- /dev/null
+++ b/cwltool/run_job.py
@@ -0,0 +1,101 @@
+"""Only used when there is a job script or CWLTOOL_FORCE_SHELL_POPEN=1."""
+import json
+import os
+import subprocess # nosec
+import sys
+from typing import BinaryIO, Dict, List, Optional, TextIO, Union
+
+
+def handle_software_environment(cwl_env: Dict[str, str], script: str) -> Dict[str, str]:
+ """Update the provided environment dict by running the script."""
+ exec_env = cwl_env.copy()
+ exec_env["_CWLTOOL"] = "1"
+ res = subprocess.run(["bash", script], shell=False, env=exec_env) # nosec
+ if res.returncode != 0:
+ sys.stderr.write(
+ "Error while using SoftwareRequirements to modify environment\n"
+ )
+ return cwl_env
+
+ env = cwl_env.copy()
+ with open("output_environment.dat") as _:
+ data = _.read().strip("\0")
+ for line in data.split("\0"):
+ key, val = line.split("=", 1)
+ if key in ("_", "PWD", "SHLVL", "TMPDIR", "HOME", "_CWLTOOL"):
+ # Skip some variables that are meaningful to the shell or set
+ # specifically by the CWL runtime environment.
+ continue
+ env[key] = val
+ return env
+
+
+def main(argv: List[str]) -> int:
+ """
+ Read in the configuration JSON and execute the commands.
+
+ The first argument is the path to the JSON dictionary file containing keys:
+ "commands": an array of strings that represents the command line to run
+ "cwd": A string specifying which directory to run in
+ "env": a dictionary of strings containing the environment variables to set
+ "stdin_path": a string (or a null) giving the path that should be piped to STDIN
+ "stdout_path": a string (or a null) giving the path that should receive the STDOUT
+ "stderr_path": a string (or a null) giving the path that should receive the STDERR
+
+ The second argument is optional, it specifes a shell script to execute prior,
+ and the environment variables it sets will be combined with the environment
+ variables from the "env" key in the JSON dictionary from the first argument.
+ """
+ with open(argv[1]) as f:
+ popen_description = json.load(f)
+ commands = popen_description["commands"]
+ cwd = popen_description["cwd"]
+ env = popen_description["env"]
+ env["PATH"] = os.environ.get("PATH")
+ stdin_path = popen_description["stdin_path"]
+ stdout_path = popen_description["stdout_path"]
+ stderr_path = popen_description["stderr_path"]
+ if stdin_path is not None:
+ stdin: Union[BinaryIO, int] = open(stdin_path, "rb")
+ else:
+ stdin = subprocess.PIPE
+ if stdout_path is not None:
+ stdout: Union[BinaryIO, TextIO] = open(stdout_path, "wb")
+ else:
+ stdout = sys.stderr
+ if stderr_path is not None:
+ stderr: Union[BinaryIO, TextIO] = open(stderr_path, "wb")
+ else:
+ stderr = sys.stderr
+
+ try:
+ env_script: Optional[str] = argv[2]
+ except IndexError:
+ env_script = None
+ if env_script is not None:
+ env = handle_software_environment(env, env_script)
+
+ sp = subprocess.Popen( # nosec
+ commands,
+ shell=False,
+ close_fds=True,
+ stdin=stdin,
+ stdout=stdout,
+ stderr=stderr,
+ env=env,
+ cwd=cwd,
+ )
+ if sp.stdin:
+ sp.stdin.close()
+ rcode = sp.wait()
+ if not isinstance(stdin, int):
+ stdin.close()
+ if stdout is not sys.stderr:
+ stdout.close()
+ if stderr is not sys.stderr:
+ stderr.close()
+ return rcode
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv))
diff --git a/cwltool/sandboxjs.py b/cwltool/sandboxjs.py
index f23086c1d6..a677ba0dd9 100644
--- a/cwltool/sandboxjs.py
+++ b/cwltool/sandboxjs.py
@@ -3,11 +3,9 @@
import errno
import json
import os
-import queue
import re
import select
import subprocess # nosec
-import sys
import threading
from io import BytesIO
from typing import List, Optional, Tuple, cast
@@ -16,7 +14,7 @@
from schema_salad.utils import json_dumps
from .loghandler import _logger
-from .utils import CWLOutputType, onWindows, processes_to_kill
+from .utils import CWLOutputType, processes_to_kill
class JavascriptException(Exception):
@@ -180,7 +178,7 @@ def exec_js_process(
else:
nodejs = localdata.procs.get(js_engine)
- if nodejs is None or nodejs.poll() is not None or onWindows():
+ if nodejs is None or nodejs.poll() is not None:
res = resource_stream(__name__, js_engine)
js_engine_code = res.read().decode("utf-8")
@@ -226,95 +224,20 @@ def process_finished() -> bool:
PROCESS_FINISHED_STR
) and stderr_buf.getvalue().decode("utf-8").endswith(PROCESS_FINISHED_STR)
- # On windows system standard input/output are not handled properly by select module
- # (modules like pywin32, msvcrt, gevent don't work either)
- if sys.platform == "win32":
- READ_BYTES_SIZE = 512
-
- # creating queue for reading from a thread to queue
- input_queue = queue.Queue()
- output_queue = queue.Queue()
- error_queue = queue.Queue()
-
- # To tell threads that output has ended and threads can safely exit
- no_more_output = threading.Lock()
- no_more_output.acquire()
- no_more_error = threading.Lock()
- no_more_error.acquire()
-
- # put constructed command to input queue which then will be passed to nodejs's stdin
- def put_input(input_queue):
- while True:
- buf = stdin_buf.read(READ_BYTES_SIZE)
- if buf:
- input_queue.put(buf)
- else:
- break
-
- # get the output from nodejs's stdout and continue till output ends
- def get_output(output_queue):
- while not no_more_output.acquire(False):
- buf = os.read(nodejs.stdout.fileno(), READ_BYTES_SIZE)
- if buf:
- output_queue.put(buf)
-
- # get the output from nodejs's stderr and continue till error output ends
- def get_error(error_queue):
- while not no_more_error.acquire(False):
- buf = os.read(nodejs.stderr.fileno(), READ_BYTES_SIZE)
+ while not process_finished() and timer.is_alive():
+ rready, wready, _ = select.select(rselect, wselect, [])
+ try:
+ if nodejs.stdin in wready:
+ buf = stdin_buf.read(select.PIPE_BUF)
if buf:
- error_queue.put(buf)
-
- # Threads managing nodejs.stdin, nodejs.stdout and nodejs.stderr respectively
- input_thread = threading.Thread(target=put_input, args=(input_queue,))
- input_thread.daemon = True
- input_thread.start()
- output_thread = threading.Thread(target=get_output, args=(output_queue,))
- output_thread.daemon = True
- output_thread.start()
- error_thread = threading.Thread(target=get_error, args=(error_queue,))
- error_thread.daemon = True
- error_thread.start()
-
- finished = False
-
- while not finished and timer.is_alive():
- try:
- if nodejs.stdin in wselect:
- if not input_queue.empty():
- os.write(nodejs.stdin.fileno(), input_queue.get())
- elif not input_thread.is_alive():
- wselect = []
- if nodejs.stdout in rselect:
- if not output_queue.empty():
- stdout_buf.write(output_queue.get())
-
- if nodejs.stderr in rselect:
- if not error_queue.empty():
- stderr_buf.write(error_queue.get())
-
- if process_finished() and error_queue.empty() and output_queue.empty():
- finished = True
- no_more_output.release()
- no_more_error.release()
- except OSError:
- break
-
- else:
- while not process_finished() and timer.is_alive():
- rready, wready, _ = select.select(rselect, wselect, [])
- try:
- if nodejs.stdin in wready:
- buf = stdin_buf.read(select.PIPE_BUF)
+ os.write(nodejs.stdin.fileno(), buf)
+ for pipes in ((nodejs.stdout, stdout_buf), (nodejs.stderr, stderr_buf)):
+ if pipes[0] in rready:
+ buf = os.read(pipes[0].fileno(), select.PIPE_BUF)
if buf:
- os.write(nodejs.stdin.fileno(), buf)
- for pipes in ((nodejs.stdout, stdout_buf), (nodejs.stderr, stderr_buf)):
- if pipes[0] in rready:
- buf = os.read(pipes[0].fileno(), select.PIPE_BUF)
- if buf:
- pipes[1].write(buf)
- except OSError:
- break
+ pipes[1].write(buf)
+ except OSError:
+ break
timer.cancel()
stdin_buf.close()
@@ -330,10 +253,6 @@ def get_error(error_queue):
returncode = nodejs.returncode
else:
returncode = 0
- # On windows currently a new instance of nodejs process is used due to
- # problem with blocking on read operation on windows
- if onWindows():
- nodejs.kill()
return returncode, stdoutdata.decode("utf-8"), stderrdata.decode("utf-8")
@@ -344,7 +263,7 @@ def code_fragment_to_js(jscript: str, jslib: str = "") -> str:
else:
inner_js = "{return (%s);}" % jscript
- return u'"use strict";\n{}\n(function(){})()'.format(jslib, inner_js)
+ return f'"use strict";\n{jslib}\n(function(){inner_js})()'
def execjs(
@@ -394,15 +313,17 @@ def fn_linenum() -> str:
% (returncode, fn_linenum(), stdfmt(stdout), stdfmt(stderr))
)
else:
- info = "Javascript expression was: %s\nstdout was: %s\nstderr was: %s" % (
- js,
- stdfmt(stdout),
- stdfmt(stderr),
+ info = (
+ "Javascript expression was: {}\nstdout was: {}\nstderr was: {}".format(
+ js,
+ stdfmt(stdout),
+ stdfmt(stderr),
+ )
)
if returncode == -1:
raise JavascriptException(
- "Long-running script killed after {} seconds: {}".format(timeout, info)
+ f"Long-running script killed after {timeout} seconds: {info}"
)
else:
raise JavascriptException(info)
diff --git a/cwltool/secrets.py b/cwltool/secrets.py
index 0d0f5b23d8..7356c4c91d 100644
--- a/cwltool/secrets.py
+++ b/cwltool/secrets.py
@@ -5,7 +5,7 @@
from .utils import CWLObjectType, CWLOutputType
-class SecretStore(object):
+class SecretStore:
"""Minimal implementation of a secret storage."""
def __init__(self) -> None:
diff --git a/cwltool/singularity.py b/cwltool/singularity.py
index 9361bc0169..ea87acd144 100644
--- a/cwltool/singularity.py
+++ b/cwltool/singularity.py
@@ -1,11 +1,11 @@
"""Support for executing Docker containers using the Singularity 2.x engine."""
+import logging
import os
import os.path
import re
import shutil
import sys
-from distutils import spawn
from subprocess import ( # nosec
DEVNULL,
PIPE,
@@ -20,17 +20,11 @@
from .builder import Builder
from .context import RuntimeContext
-from .errors import UnsupportedRequirement, WorkflowException
+from .errors import WorkflowException
from .job import ContainerCommandLineJob
from .loghandler import _logger
from .pathmapper import MapperEnt, PathMapper
-from .utils import (
- CWLObjectType,
- create_tmp_dir,
- docker_windows_path_adjust,
- ensure_non_writable,
- ensure_writable,
-)
+from .utils import CWLObjectType, create_tmp_dir, ensure_non_writable, ensure_writable
_USERNS = None # type: Optional[bool]
_SINGULARITY_VERSION = ""
@@ -50,6 +44,7 @@ def _singularity_supports_userns() -> bool:
_USERNS = (
"No valid /bin/sh" in result
or "/bin/sh doesn't exist in container" in result
+ or "executable file not found in" in result
)
except TimeoutExpired:
_USERNS = False
@@ -58,12 +53,13 @@ def _singularity_supports_userns() -> bool:
def get_version() -> str:
global _SINGULARITY_VERSION # pylint: disable=global-statement
- if not _SINGULARITY_VERSION:
+ if _SINGULARITY_VERSION == "":
_SINGULARITY_VERSION = check_output( # nosec
["singularity", "--version"], universal_newlines=True
- )
+ ).strip()
if _SINGULARITY_VERSION.startswith("singularity version "):
_SINGULARITY_VERSION = _SINGULARITY_VERSION[20:]
+ _logger.debug(f"Singularity version: {_SINGULARITY_VERSION}.")
return _SINGULARITY_VERSION
@@ -80,6 +76,12 @@ def is_version_3_1_or_newer() -> bool:
return int(version[0]) >= 4 or (int(version[0]) == 3 and int(version[1]) >= 1)
+def is_version_3_4_or_newer() -> bool:
+ """Detect if Singularity v3.4+ is available."""
+ version = get_version().split(".")
+ return int(version[0]) >= 4 or (int(version[0]) == 3 and int(version[1]) >= 4)
+
+
def _normalize_image_id(string: str) -> str:
return string.replace("/", "_") + ".img"
@@ -99,9 +101,7 @@ def __init__(
name: str,
) -> None:
"""Builder for invoking the Singularty software container engine."""
- super(SingularityCommandLineJob, self).__init__(
- builder, joborder, make_path_mapper, requirements, hints, name
- )
+ super().__init__(builder, joborder, make_path_mapper, requirements, hints, name)
@staticmethod
def get_image(
@@ -122,6 +122,8 @@ def get_image(
candidates = []
cache_folder = None
+ debug = _logger.isEnabledFor(logging.DEBUG)
+
if "CWL_SINGULARITY_CACHE" in os.environ:
cache_folder = os.environ["CWL_SINGULARITY_CACHE"]
elif is_version_2_6() and "SINGULARITY_PULLFOLDER" in os.environ:
@@ -218,11 +220,11 @@ def get_image(
found = True
elif "dockerFile" in dockerRequirement:
- raise WorkflowException(
- SourceLine(dockerRequirement, "dockerFile").makeError(
- "dockerFile is not currently supported when using the "
- "Singularity runtime for Docker containers."
- )
+ raise SourceLine(
+ dockerRequirement, "dockerFile", WorkflowException, debug
+ ).makeError(
+ "dockerFile is not currently supported when using the "
+ "Singularity runtime for Docker containers."
)
elif "dockerLoad" in dockerRequirement:
if is_version_3_1_or_newer():
@@ -240,18 +242,18 @@ def get_image(
check_call(cmd, stdout=sys.stderr) # nosec
found = True
dockerRequirement["dockerImageId"] = name
- raise WorkflowException(
- SourceLine(dockerRequirement, "dockerLoad").makeError(
- "dockerLoad is not currently supported when using the "
- "Singularity runtime (version less than 3.1) for Docker containers."
- )
+ raise SourceLine(
+ dockerRequirement, "dockerLoad", WorkflowException, debug
+ ).makeError(
+ "dockerLoad is not currently supported when using the "
+ "Singularity runtime (version less than 3.1) for Docker containers."
)
elif "dockerImport" in dockerRequirement:
- raise WorkflowException(
- SourceLine(dockerRequirement, "dockerImport").makeError(
- "dockerImport is not currently supported when using the "
- "Singularity runtime for Docker containers."
- )
+ raise SourceLine(
+ dockerRequirement, "dockerImport", WorkflowException, debug
+ ).makeError(
+ "dockerImport is not currently supported when using the "
+ "Singularity runtime for Docker containers."
)
return found
@@ -268,7 +270,7 @@ def get_from_requirements(
(e.g. hello-world-latest.{img,sif}).
"""
- if not bool(spawn.find_executable("singularity")):
+ if not bool(shutil.which("singularity")):
raise WorkflowException("singularity executable is not available")
if not self.get_image(cast(Dict[str, str], r), pull_image, force_pull):
@@ -283,29 +285,27 @@ def append_volume(
runtime: List[str], source: str, target: str, writable: bool = False
) -> None:
runtime.append("--bind")
- runtime.append(
- "{}:{}:{}".format(
- docker_windows_path_adjust(source),
- docker_windows_path_adjust(target),
- "rw" if writable else "ro",
- )
- )
+ # Mounts are writable by default, so 'rw' is optional and not
+ # supported (due to a bug) in some 3.6 series releases.
+ vol = f"{source}:{target}"
+ if not writable:
+ vol += ":ro"
+ runtime.append(vol)
def add_file_or_directory_volume(
self, runtime: List[str], volume: MapperEnt, host_outdir_tgt: Optional[str]
) -> None:
- if host_outdir_tgt is not None:
- # workaround for lack of overlapping mounts in Singularity
- # revert to daa923d5b0be3819b6ed0e6440e7193e65141052
- # once https://github.com/sylabs/singularity/issues/1607
- # is fixed
- if volume.type == "File":
- shutil.copy(volume.resolved, host_outdir_tgt)
+ if not volume.resolved.startswith("_:"):
+ if host_outdir_tgt is not None and not is_version_3_4_or_newer():
+ # workaround for lack of overlapping mounts in Singularity <3.4
+ if volume.type == "File":
+ os.makedirs(os.path.dirname(host_outdir_tgt), exist_ok=True)
+ shutil.copy(volume.resolved, host_outdir_tgt)
+ else:
+ shutil.copytree(volume.resolved, host_outdir_tgt)
+ ensure_non_writable(host_outdir_tgt)
else:
- shutil.copytree(volume.resolved, host_outdir_tgt)
- ensure_non_writable(host_outdir_tgt)
- elif not volume.resolved.startswith("_:"):
- self.append_volume(runtime, volume.resolved, volume.target)
+ self.append_volume(runtime, volume.resolved, volume.target)
def add_writable_file_volume(
self,
@@ -314,11 +314,8 @@ def add_writable_file_volume(
host_outdir_tgt: Optional[str],
tmpdir_prefix: str,
) -> None:
- if host_outdir_tgt is not None:
- # workaround for lack of overlapping mounts in Singularity
- # revert to daa923d5b0be3819b6ed0e6440e7193e65141052
- # once https://github.com/sylabs/singularity/issues/1607
- # is fixed
+ if host_outdir_tgt is not None and not is_version_3_4_or_newer():
+ # workaround for lack of overlapping mounts in Singularity <3.4
if self.inplace_update:
try:
os.link(os.path.realpath(volume.resolved), host_outdir_tgt)
@@ -331,14 +328,21 @@ def add_writable_file_volume(
self.append_volume(runtime, volume.resolved, volume.target, writable=True)
ensure_writable(volume.resolved)
else:
- file_copy = os.path.join(
- create_tmp_dir(tmpdir_prefix),
- os.path.basename(volume.resolved),
- )
- shutil.copy(volume.resolved, file_copy)
- # volume.resolved = file_copy
- self.append_volume(runtime, file_copy, volume.target, writable=True)
- ensure_writable(file_copy)
+ if host_outdir_tgt:
+ # shortcut, just copy to the output directory
+ # which is already going to be mounted
+ if not os.path.exists(os.path.dirname(host_outdir_tgt)):
+ os.makedirs(os.path.dirname(host_outdir_tgt))
+ shutil.copy(volume.resolved, host_outdir_tgt)
+ ensure_writable(host_outdir_tgt)
+ else:
+ file_copy = os.path.join(
+ create_tmp_dir(tmpdir_prefix),
+ os.path.basename(volume.resolved),
+ )
+ shutil.copy(volume.resolved, file_copy)
+ self.append_volume(runtime, file_copy, volume.target, writable=True)
+ ensure_writable(file_copy)
def add_writable_directory_volume(
self,
@@ -348,35 +352,46 @@ def add_writable_directory_volume(
tmpdir_prefix: str,
) -> None:
if volume.resolved.startswith("_:"):
- if host_outdir_tgt is not None:
- new_dir = host_outdir_tgt
- else:
+ # Synthetic directory that needs creating first
+ if not host_outdir_tgt:
new_dir = os.path.join(
create_tmp_dir(tmpdir_prefix),
- os.path.basename(volume.resolved),
+ os.path.basename(volume.target),
)
- os.makedirs(new_dir)
+ self.append_volume(runtime, new_dir, volume.target, writable=True)
+ os.makedirs(new_dir)
+ # ^^ Unlike Docker, Singularity won't create directories on demand
+ elif not os.path.exists(host_outdir_tgt):
+ os.makedirs(host_outdir_tgt)
else:
- if host_outdir_tgt is not None:
- # workaround for lack of overlapping mounts in Singularity
- # revert to daa923d5b0be3819b6ed0e6440e7193e65141052
- # once https://github.com/sylabs/singularity/issues/1607
- # is fixed
+ if host_outdir_tgt is not None and not is_version_3_4_or_newer():
+ # workaround for lack of overlapping mounts in Singularity < 3.4
shutil.copytree(volume.resolved, host_outdir_tgt)
ensure_writable(host_outdir_tgt)
else:
- if not self.inplace_update:
- dir_copy = os.path.join(
- create_tmp_dir(tmpdir_prefix),
- os.path.basename(volume.resolved),
+ if self.inplace_update:
+ self.append_volume(
+ runtime, volume.resolved, volume.target, writable=True
)
- shutil.copytree(volume.resolved, dir_copy)
- source = dir_copy
- # volume.resolved = dir_copy
else:
- source = volume.resolved
- self.append_volume(runtime, source, volume.target, writable=True)
- ensure_writable(source)
+ if not host_outdir_tgt:
+ tmpdir = create_tmp_dir(tmpdir_prefix)
+ new_dir = os.path.join(
+ tmpdir, os.path.basename(volume.resolved)
+ )
+ shutil.copytree(volume.resolved, new_dir)
+ self.append_volume(
+ runtime, new_dir, volume.target, writable=True
+ )
+ else:
+ shutil.copytree(volume.resolved, host_outdir_tgt)
+ ensure_writable(host_outdir_tgt or new_dir)
+
+ def _required_env(self) -> Dict[str, str]:
+ return {
+ "TMPDIR": self.CONTAINER_TMPDIR,
+ "HOME": self.builder.outdir,
+ }
def create_runtime(
self, env: MutableMapping[str, str], runtime_context: RuntimeContext
@@ -389,33 +404,34 @@ def create_runtime(
"exec",
"--contain",
"--ipc",
+ "--cleanenv",
]
if _singularity_supports_userns():
runtime.append("--userns")
else:
runtime.append("--pid")
+
+ container_HOME: Optional[str] = None
if is_version_3_1_or_newer():
+ # Remove HOME, as passed in a special way (restore it below)
+ container_HOME = self.environment.pop("HOME")
runtime.append("--home")
runtime.append(
"{}:{}".format(
- docker_windows_path_adjust(os.path.realpath(self.outdir)),
- self.builder.outdir,
+ os.path.realpath(self.outdir),
+ container_HOME,
)
)
else:
- runtime.append("--bind")
- runtime.append(
- "{}:{}:rw".format(
- docker_windows_path_adjust(os.path.realpath(self.outdir)),
- self.builder.outdir,
- )
- )
- runtime.append("--bind")
- tmpdir = "/tmp" # nosec
- runtime.append(
- "{}:{}:rw".format(
- docker_windows_path_adjust(os.path.realpath(self.tmpdir)), tmpdir
+ self.append_volume(
+ runtime,
+ os.path.realpath(self.outdir),
+ self.environment["HOME"],
+ writable=True,
)
+
+ self.append_volume(
+ runtime, os.path.realpath(self.tmpdir), self.CONTAINER_TMPDIR, writable=True
)
self.add_volumes(
@@ -435,18 +451,18 @@ def create_runtime(
)
runtime.append("--pwd")
- runtime.append("%s" % (docker_windows_path_adjust(self.builder.outdir)))
+ runtime.append(self.builder.outdir)
- if runtime_context.custom_net:
- raise UnsupportedRequirement(
- "Singularity implementation does not support custom networking"
- )
- elif runtime_context.disable_net:
- runtime.append("--net")
-
- env["SINGULARITYENV_TMPDIR"] = tmpdir
- env["SINGULARITYENV_HOME"] = self.builder.outdir
+ if self.networkaccess:
+ if runtime_context.custom_net:
+ runtime.extend(["--net", "--network", runtime_context.custom_net])
+ else:
+ runtime.extend(["--net", "--network", "none"])
for name, value in self.environment.items():
- env["SINGULARITYENV_{}".format(name)] = str(value)
+ env[f"SINGULARITYENV_{name}"] = str(value)
+
+ if container_HOME:
+ # Restore HOME if we removed it above.
+ self.environment["HOME"] = container_HOME
return (runtime, None)
diff --git a/cwltool/software_requirements.py b/cwltool/software_requirements.py
index 3ba602c56d..1928a763c7 100644
--- a/cwltool/software_requirements.py
+++ b/cwltool/software_requirements.py
@@ -1,7 +1,7 @@
"""This module handles resolution of SoftwareRequirement hints.
-This is accomplished mainly by adapting cwltool internals to galaxy-lib's
-concept of "dependencies". Despite the name, galaxy-lib is a light weight
+This is accomplished mainly by adapting cwltool internals to galaxy-tool-util's
+concept of "dependencies". Despite the name, galaxy-tool-util is a light weight
library that can be used to map SoftwareRequirements in all sorts of ways -
Homebrew, Conda, custom scripts, environment modules. We'd be happy to find
ways to adapt new packages managers and such as well.
@@ -30,13 +30,23 @@
COMMAND_WITH_DEPENDENCIES_TEMPLATE = string.Template(
"""#!/bin/bash
+cat > modify_environment.bash <<'EOF'
$handle_dependencies
-python3 "run_job.py" "job.json"
+# First try env -0
+if ! env -0 > "output_environment.dat" 2> /dev/null; then
+ # If that fails, use the python script.
+ # In some circumstances (see PEP 538) this will the add LC_CTYPE env var.
+ python3 "env_to_stdout.py" > "output_environment.dat"
+fi
+EOF
+python3 "run_job.py" "job.json" "modify_environment.bash"
"""
)
-class DependenciesConfiguration(object):
+class DependenciesConfiguration:
+ """Dependency configuration class, for RuntimeContext.job_script_provider."""
+
def __init__(self, args: argparse.Namespace) -> None:
"""Initialize."""
conf_file = getattr(args, "beta_dependency_resolvers_configuration", None)
@@ -149,5 +159,5 @@ def get_container_from_software_requirements(
def ensure_galaxy_lib_available() -> None:
if not SOFTWARE_REQUIREMENTS_ENABLED:
raise Exception(
- "Optional Python library galaxy-lib not available, it is required for this configuration."
+ "Optional Python library galaxy-tool-util not available, it is required for this configuration."
)
diff --git a/cwltool/stdfsaccess.py b/cwltool/stdfsaccess.py
index 338b21df32..0c8eea15d5 100644
--- a/cwltool/stdfsaccess.py
+++ b/cwltool/stdfsaccess.py
@@ -7,8 +7,6 @@
from schema_salad.ref_resolver import file_uri, uri_file_path
-from .utils import onWindows
-
def abspath(src: str, basedir: str) -> str:
if src.startswith("file://"):
@@ -23,7 +21,7 @@ def abspath(src: str, basedir: str) -> str:
return abpath
-class StdFsAccess(object):
+class StdFsAccess:
"""Local filesystem implementation."""
def __init__(self, basedir: str) -> None:
@@ -64,11 +62,3 @@ def join(self, path, *paths): # type: (str, *str) -> str
def realpath(self, path: str) -> str:
return os.path.realpath(path)
-
- # On windows os.path.realpath appends unecessary Drive, here we would avoid that
- def docker_compatible_realpath(self, path: str) -> str:
- if onWindows():
- if path.startswith("/"):
- return path
- return "/" + path
- return self.realpath(path)
diff --git a/cwltool/subgraph.py b/cwltool/subgraph.py
index 2df2f206d9..ec4537fc20 100644
--- a/cwltool/subgraph.py
+++ b/cwltool/subgraph.py
@@ -1,11 +1,22 @@
import urllib
from collections import namedtuple
-from typing import Dict, MutableMapping, MutableSequence, Optional, Set, Tuple, cast
+from typing import (
+ Any,
+ Dict,
+ List,
+ Mapping,
+ MutableMapping,
+ MutableSequence,
+ Optional,
+ Set,
+ Tuple,
+ cast,
+)
from ruamel.yaml.comments import CommentedMap
from .utils import CWLObjectType, aslist
-from .workflow import Workflow
+from .workflow import Workflow, WorkflowStep
Node = namedtuple("Node", ("up", "down", "type"))
UP = "up"
@@ -44,11 +55,18 @@ def declare_node(nodes: Dict[str, Node], nodeid: str, tp: Optional[str]) -> Node
return nodes[nodeid]
+def find_step(steps: List[WorkflowStep], stepid: str) -> Optional[CWLObjectType]:
+ for st in steps:
+ if st.tool["id"] == stepid:
+ return st.tool
+ return None
+
+
def get_subgraph(roots: MutableSequence[str], tool: Workflow) -> CommentedMap:
if tool.tool["class"] != "Workflow":
raise Exception("Can only extract subgraph from workflow")
- nodes = {} # type: Dict[str, Node]
+ nodes: Dict[str, Node] = {}
for inp in tool.tool["inputs"]:
declare_node(nodes, inp["id"], INPUT)
@@ -74,6 +92,8 @@ def get_subgraph(roots: MutableSequence[str], tool: Workflow) -> CommentedMap:
declare_node(nodes, src, None)
nodes[src].down.append(st["id"])
for out in st["out"]:
+ if isinstance(out, Mapping) and "id" in out:
+ out = out["id"]
# output is downstream from step
step.down.append(out)
# step is upstream from output
@@ -81,22 +101,16 @@ def get_subgraph(roots: MutableSequence[str], tool: Workflow) -> CommentedMap:
nodes[out].up.append(st["id"])
# Find all the downstream nodes from the starting points
- visited_down = set() # type: Set[str]
+ visited_down: Set[str] = set()
for r in roots:
if nodes[r].type == OUTPUT:
subgraph_visit(r, nodes, visited_down, UP)
else:
subgraph_visit(r, nodes, visited_down, DOWN)
- def find_step(stepid: str) -> Optional[CWLObjectType]:
- for st in tool.steps:
- if st.tool["id"] == stepid:
- return st.tool
- return None
-
# Now make sure all the nodes are connected to upstream inputs
- visited = set() # type: Set[str]
- rewire = {} # type: Dict[str, Tuple[str, str]]
+ visited: Set[str] = set()
+ rewire: Dict[str, Tuple[str, CWLObjectType]] = {}
for v in visited_down:
visited.add(v)
if nodes[v].type in (STEP, OUTPUT):
@@ -108,15 +122,17 @@ def find_step(stepid: str) -> Optional[CWLObjectType]:
else:
# rewire
df = urllib.parse.urldefrag(u)
- rn = df[0] + "#" + df[1].replace("/", "_")
+ rn = str(df[0] + "#" + df[1].replace("/", "_"))
if nodes[v].type == STEP:
- wfstep = find_step(v)
+ wfstep = find_step(tool.steps, v)
if wfstep is not None:
for inp in cast(
MutableSequence[CWLObjectType], wfstep["inputs"]
):
- if u in inp["source"]:
- rewire[u] = (rn, inp["type"])
+ if "source" in inp and u in cast(
+ CWLObjectType, inp["source"]
+ ):
+ rewire[u] = (rn, cast(CWLObjectType, inp["type"]))
break
else:
raise Exception("Could not find step %s" % v)
@@ -147,3 +163,49 @@ def find_step(stepid: str) -> Optional[CWLObjectType]:
extracted["inputs"].append({"id": rv[0], "type": rv[1]})
return extracted
+
+
+def get_step(tool: Workflow, step_id: str) -> CommentedMap:
+
+ extracted = CommentedMap()
+
+ step = find_step(tool.steps, step_id)
+ if step is None:
+ raise Exception(f"Step {step_id} was not found")
+
+ extracted["steps"] = [step]
+ extracted["inputs"] = []
+ extracted["outputs"] = []
+
+ for inport in cast(List[CWLObjectType], step["in"]):
+ name = cast(str, inport["id"]).split("#")[-1].split("/")[-1]
+ extracted["inputs"].append({"id": name, "type": "Any"})
+ inport["source"] = name
+ if "linkMerge" in inport:
+ del inport["linkMerge"]
+
+ for outport in cast(List[str], step["out"]):
+ name = outport.split("#")[-1].split("/")[-1]
+ extracted["outputs"].append(
+ {"id": name, "type": "Any", "outputSource": f"{step_id}/{name}"}
+ )
+
+ for f in tool.tool:
+ if f not in ("steps", "inputs", "outputs"):
+ extracted[f] = tool.tool[f]
+
+ return extracted
+
+
+def get_process(tool: Workflow, step_id: str, index: Mapping[str, Any]) -> Any:
+ """Return just a single Process from a Workflow step."""
+ step = find_step(tool.steps, step_id)
+ if step is None:
+ raise Exception(f"Step {step_id} was not found")
+
+ run = step["run"]
+
+ if isinstance(run, str):
+ return index[run]
+ else:
+ return run
diff --git a/cwltool/task_queue.py b/cwltool/task_queue.py
index ba834008b9..76cb760e5e 100644
--- a/cwltool/task_queue.py
+++ b/cwltool/task_queue.py
@@ -5,13 +5,12 @@
import queue
import threading
-
from typing import Callable, Optional
from .loghandler import _logger
-class TaskQueue(object):
+class TaskQueue:
"""A TaskQueue class.
Uses a first-in, first-out queue of tasks executed on a fixed number of
diff --git a/cwltool/update.py b/cwltool/update.py
index 8e261baf0b..e60c696f9d 100644
--- a/cwltool/update.py
+++ b/cwltool/update.py
@@ -26,9 +26,9 @@ def v1_1to1_2(
"""Public updater for v1.1 to v1.2."""
doc = copy.deepcopy(doc)
- upd = doc
+ upd: Union[CommentedSeq, CommentedMap] = doc
if isinstance(upd, MutableMapping) and "$graph" in upd:
- upd = cast(CommentedMap, upd["$graph"])
+ upd = upd["$graph"]
for proc in aslist(upd):
if "cwlVersion" in proc:
del proc["cwlVersion"]
@@ -72,7 +72,7 @@ def rewrite_requirements(t: CWLObjectType) -> None:
r["class"] = rewrite[cls]
else:
raise ValidationException(
- "hints entries must be dictionaries: {} {}.".format(type(r), r)
+ f"hints entries must be dictionaries: {type(r)} {r}."
)
if "steps" in t:
for s in cast(MutableSequence[CWLObjectType], t["steps"]):
@@ -80,7 +80,7 @@ def rewrite_requirements(t: CWLObjectType) -> None:
rewrite_requirements(s)
else:
raise ValidationException(
- "steps entries must be dictionaries: {} {}.".format(type(s), s)
+ f"steps entries must be dictionaries: {type(s)} {s}."
)
def update_secondaryFiles(t, top=False):
@@ -119,9 +119,9 @@ def fix_inputBinding(t: CWLObjectType) -> None:
visit_class(doc, ("ExpressionTool", "Workflow"), fix_inputBinding)
visit_field(doc, "secondaryFiles", partial(update_secondaryFiles, top=True))
- upd = doc
+ upd: Union[CommentedMap, CommentedSeq] = doc
if isinstance(upd, MutableMapping) and "$graph" in upd:
- upd = cast(CommentedMap, upd["$graph"])
+ upd = upd["$graph"]
for proc in aslist(upd):
proc.setdefault("hints", CommentedSeq())
proc["hints"].insert(
@@ -167,9 +167,9 @@ def update_pickvalue(t: CWLObjectType) -> None:
inp["pickValue"] = "the_only_non_null"
visit_class(doc, "Workflow", update_pickvalue)
- upd = doc
+ upd: Union[CommentedSeq, CommentedMap] = doc
if isinstance(upd, MutableMapping) and "$graph" in upd:
- upd = cast(CommentedMap, upd["$graph"])
+ upd = upd["$graph"]
for proc in aslist(upd):
if "cwlVersion" in proc:
del proc["cwlVersion"]
@@ -210,25 +210,25 @@ def v1_2_0dev5to1_2(
]
UPDATES = {
- u"v1.0": v1_0to1_1,
- u"v1.1": v1_1to1_2,
- u"v1.2": None,
+ "v1.0": v1_0to1_1,
+ "v1.1": v1_1to1_2,
+ "v1.2": None,
} # type: Dict[str, Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]]]
DEVUPDATES = {
- u"v1.1.0-dev1": v1_1_0dev1to1_1,
- u"v1.2.0-dev1": v1_2_0dev1todev2,
- u"v1.2.0-dev2": v1_2_0dev2todev3,
- u"v1.2.0-dev3": v1_2_0dev3todev4,
- u"v1.2.0-dev4": v1_2_0dev4todev5,
- u"v1.2.0-dev5": v1_2_0dev5to1_2,
+ "v1.1.0-dev1": v1_1_0dev1to1_1,
+ "v1.2.0-dev1": v1_2_0dev1todev2,
+ "v1.2.0-dev2": v1_2_0dev2todev3,
+ "v1.2.0-dev3": v1_2_0dev3todev4,
+ "v1.2.0-dev4": v1_2_0dev4todev5,
+ "v1.2.0-dev5": v1_2_0dev5to1_2,
} # type: Dict[str, Optional[Callable[[CommentedMap, Loader, str], Tuple[CommentedMap, str]]]]
ALLUPDATES = UPDATES.copy()
ALLUPDATES.update(DEVUPDATES)
-INTERNAL_VERSION = u"v1.2"
+INTERNAL_VERSION = "v1.2"
ORIGINAL_CWLVERSION = "http://commonwl.org/cwltool#original_cwlVersion"
@@ -283,7 +283,7 @@ def checkversion(
keys = list(UPDATES.keys())
keys.sort()
raise ValidationException(
- u"Version '%s' is a development or deprecated version.\n "
+ "Version '%s' is a development or deprecated version.\n "
"Update your document to a stable version (%s) or use "
"--enable-dev to enable support for development and "
"deprecated versions." % (version, ", ".join(keys))
@@ -319,7 +319,7 @@ def update(
cdoc["cwlVersion"] = version
metadata["cwlVersion"] = version
- metadata["http://commonwl.org/cwltool#original_cwlVersion"] = originalversion
- cdoc["http://commonwl.org/cwltool#original_cwlVersion"] = originalversion
+ metadata[ORIGINAL_CWLVERSION] = originalversion
+ cdoc[ORIGINAL_CWLVERSION] = originalversion
return cdoc
diff --git a/cwltool/utils.py b/cwltool/utils.py
index 45d30c87ce..aca19536e6 100644
--- a/cwltool/utils.py
+++ b/cwltool/utils.py
@@ -2,7 +2,6 @@
import collections
import os
-import platform
import random
import shutil
import stat
@@ -53,8 +52,6 @@
CONTENT_LIMIT = 64 * 1024
-windows_default_container_id = "frolvlad/alpine-bash"
-
DEFAULT_TMP_PREFIX = tempfile.gettempdir() + os.path.sep
processes_to_kill = collections.deque() # type: Deque[subprocess.Popen[str]]
@@ -120,8 +117,8 @@ def versionstring() -> str:
"""Version of CWLtool used to execute the workflow."""
pkg = pkg_resources.require("cwltool")
if pkg:
- return "%s %s" % (sys.argv[0], pkg[0].version)
- return "%s %s" % (sys.argv[0], "unknown version")
+ return f"{sys.argv[0]} {pkg[0].version}"
+ return "{} {}".format(sys.argv[0], "unknown version")
def aslist(thing: Any) -> MutableSequence[Any]:
@@ -145,83 +142,6 @@ def copytree_with_merge(src: str, dst: str) -> None:
shutil.copy2(spath, dpath)
-def docker_windows_path_adjust(path: str) -> str:
- r"""
- Adjust only windows paths for Docker.
-
- The docker run command treats them as unix paths.
-
- Example: 'C:\Users\foo to /C/Users/foo (Docker for Windows) or /c/Users/foo
- (Docker toolbox).
- """
- if onWindows():
- split = path.split(":")
- if len(split) == 2:
- if platform.win32_ver()[0] in ("7", "8"):
- # Docker toolbox uses lowecase windows Drive letters
- split[0] = split[0].lower()
- else:
- split[0] = split[0].capitalize()
- # Docker for Windows uses uppercase windows Drive letters
- path = ":".join(split)
- path = path.replace(":", "").replace("\\", "/")
- return path if path[0] == "/" else "/" + path
- return path
-
-
-def docker_windows_reverse_path_adjust(path: str) -> str:
- r"""
- Change docker path (only on windows os) appropriately back to Windows path.
-
- Example: /C/Users/foo to C:\Users\foo
- """
- if path is not None and onWindows():
- if path[0] == "/":
- path = path[1:]
- else:
- raise ValueError("not a docker path")
- splitpath = path.split("/")
- splitpath[0] = splitpath[0] + ":"
- return "\\".join(splitpath)
- return path
-
-
-def docker_windows_reverse_fileuri_adjust(fileuri: str) -> str:
- r"""
- Convert fileuri to be MS Windows comptabile, if needed.
-
- On docker in windows fileuri do not contain : in path
- To convert this file uri to windows compatible add : after drive letter,
- so file:///E/var becomes file:///E:/var
- """
- if fileuri is not None and onWindows():
- if urllib.parse.urlsplit(fileuri).scheme == "file":
- filesplit = fileuri.split("/")
- if filesplit[3][-1] != ":":
- filesplit[3] = filesplit[3] + ":"
- return "/".join(filesplit)
- return fileuri
- raise ValueError("not a file URI")
- return fileuri
-
-
-def onWindows() -> bool:
- """Check if we are on Windows OS."""
- return os.name == "nt"
-
-
-def convert_pathsep_to_unix(path: str) -> str:
- """
- Convert path seperators to unix style.
-
- On windows os.path.join would use backslash to join path, since we would
- use these paths in Docker we would convert it to use forward slashes: /
- """
- if path is not None and onWindows():
- return path.replace("\\", "/")
- return path
-
-
def cmp_like_py2(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> int:
"""
Compare in the same manner as Python2.
@@ -447,23 +367,30 @@ def downloadHttpFile(httpurl):
return str(f.name)
-def ensure_writable(path): # type: (str) -> None
+def ensure_writable(path: str, include_root: bool = False) -> None:
+ """
+ Ensure that 'path' is writable.
+
+ If 'path' is a directory, then all files and directories under 'path' are
+ made writable, recursively. If 'path' is a file or if 'include_root' is
+ `True`, then 'path' itself is made writable.
+ """
+
+ def add_writable_flag(p: str) -> None:
+ st = os.stat(p)
+ mode = stat.S_IMODE(st.st_mode)
+ os.chmod(p, mode | stat.S_IWUSR)
+
if os.path.isdir(path):
+ if include_root:
+ add_writable_flag(path)
for root, dirs, files in os.walk(path):
for name in files:
- j = os.path.join(root, name)
- st = os.stat(j)
- mode = stat.S_IMODE(st.st_mode)
- os.chmod(j, mode | stat.S_IWUSR)
+ add_writable_flag(os.path.join(root, name))
for name in dirs:
- j = os.path.join(root, name)
- st = os.stat(j)
- mode = stat.S_IMODE(st.st_mode)
- os.chmod(j, mode | stat.S_IWUSR)
+ add_writable_flag(os.path.join(root, name))
else:
- st = os.stat(path)
- mode = stat.S_IMODE(st.st_mode)
- os.chmod(path, mode | stat.S_IWUSR)
+ add_writable_flag(path)
def ensure_non_writable(path): # type: (str) -> None
diff --git a/cwltool/validate_js.py b/cwltool/validate_js.py
index 3a2d757202..64e0df8972 100644
--- a/cwltool/validate_js.py
+++ b/cwltool/validate_js.py
@@ -16,7 +16,7 @@
)
from pkg_resources import resource_stream
-from ruamel.yaml.comments import CommentedMap
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.avro.schema import (
ArraySchema,
EnumSchema,
@@ -39,7 +39,7 @@ def is_expression(tool, schema):
# type: (Any, Optional[Schema]) -> bool
return (
isinstance(schema, EnumSchema)
- and schema.name == "Expression"
+ and schema.name == "org.w3id.cwl.cwl.Expression"
and isinstance(tool, str)
)
@@ -48,7 +48,7 @@ class SuppressLog(logging.Filter):
def __init__(self, name): # type: (str) -> None
"""Initialize this log suppressor."""
name = str(name)
- super(SuppressLog, self).__init__(name)
+ super().__init__(name)
def filter(self, record): # type: (logging.LogRecord) -> bool
return False
@@ -59,10 +59,11 @@ def filter(self, record): # type: (logging.LogRecord) -> bool
def get_expressions(
- tool: Union[CommentedMap, str],
+ tool: Union[CommentedMap, str, CommentedSeq],
schema: Optional[Union[Schema, ArraySchema]],
source_line: Optional[SourceLine] = None,
) -> List[Tuple[str, Optional[SourceLine]]]:
+ debug = _logger.isEnabledFor(logging.DEBUG)
if is_expression(tool, schema):
return [(cast(str, tool), source_line)]
elif isinstance(schema, UnionSchema):
@@ -76,6 +77,7 @@ def get_expressions(
tool,
raise_ex=False,
logger=_logger_validation_warnings,
+ vocab={},
):
valid_schema = possible_schema
@@ -84,12 +86,20 @@ def get_expressions(
if not isinstance(tool, MutableSequence):
return []
+ def tmp_expr(
+ x: Tuple[int, Union[CommentedMap, str, CommentedSeq]]
+ ) -> List[Tuple[str, Optional[SourceLine]]]:
+ # using a lambda for this broke mypyc v0.910 and before
+ return get_expressions(
+ x[1],
+ cast(ArraySchema, schema).items,
+ SourceLine(tool, x[0], include_traceback=debug),
+ )
+
return list(
itertools.chain(
*map(
- lambda x: get_expressions(
- x[1], schema.items, SourceLine(tool, x[0]) # type: ignore
- ),
+ tmp_expr,
enumerate(tool),
)
)
@@ -107,7 +117,7 @@ def get_expressions(
get_expressions(
tool[schema_field.name],
schema_field.type,
- SourceLine(tool, schema_field.name),
+ SourceLine(tool, schema_field.name, include_traceback=debug),
)
)
@@ -116,7 +126,7 @@ def get_expressions(
return []
-JSHintJSReturn = namedtuple("jshint_return", ["errors", "globals"])
+JSHintJSReturn = namedtuple("JSHintJSReturn", ["errors", "globals"])
def jshint_js(
@@ -182,7 +192,7 @@ def dump_jshint_error():
for jshint_error_obj in jshint_json.get("errors", []):
text = "JSHINT: " + js_text_lines[jshint_error_obj["line"] - 1] + "\n"
text += "JSHINT: " + " " * (jshint_error_obj["character"] - 1) + "^\n"
- text += "JSHINT: %s: %s" % (
+ text += "JSHINT: {}: {}".format(
jshint_error_obj["code"],
jshint_error_obj["reason"],
)
@@ -207,7 +217,7 @@ def validate_js_expressions(
if tool.get("requirements") is None:
return
-
+ debug = _logger.isEnabledFor(logging.DEBUG)
requirements = tool["requirements"]
default_globals = ["self", "inputs", "runtime", "console"]
@@ -227,7 +237,8 @@ def validate_js_expressions(
)
js_globals.extend(expression_lib_line_globals)
print_js_hint_messages(
- expression_lib_line_errors, SourceLine(expression_lib, i)
+ expression_lib_line_errors,
+ SourceLine(expression_lib, i, include_traceback=debug),
)
expressions = get_expressions(tool, schema)
diff --git a/cwltool/workflow.py b/cwltool/workflow.py
index dafc3f8d2e..1ed31ac7dd 100644
--- a/cwltool/workflow.py
+++ b/cwltool/workflow.py
@@ -71,7 +71,7 @@ def __init__(
loadingContext: LoadingContext,
) -> None:
"""Initialize this Workflow."""
- super(Workflow, self).__init__(toolpath_object, loadingContext)
+ super().__init__(toolpath_object, loadingContext)
self.provenance_object = None # type: Optional[ProvenanceProfile]
if loadingContext.research_obj is not None:
run_uuid = None # type: Optional[UUID]
@@ -170,8 +170,7 @@ def job(
runtimeContext.part_of = "workflow %s" % job.name
runtimeContext.toplevel = False
- for wjob in job.job(builder.job, output_callbacks, runtimeContext):
- yield wjob
+ yield from job.job(builder.job, output_callbacks, runtimeContext)
def visit(self, op: Callable[[CommentedMap], None]) -> None:
op(self.tool)
@@ -199,6 +198,7 @@ def __init__(
parentworkflowProv: Optional[ProvenanceProfile] = None,
) -> None:
"""Initialize this WorkflowStep."""
+ debug = loadingContext.debug
if "id" in toolpath_object:
self.id = toolpath_object["id"]
else:
@@ -281,12 +281,18 @@ def __init__(
else:
step_entry_name = step_entry
validation_errors.append(
- SourceLine(self.tool["out"], index).makeError(
+ SourceLine(
+ self.tool["out"], index, include_traceback=debug
+ ).makeError(
"Workflow step output '%s' does not correspond to"
% shortname(step_entry_name)
)
+ "\n"
- + SourceLine(self.embedded_tool.tool, "outputs").makeError(
+ + SourceLine(
+ self.embedded_tool.tool,
+ "outputs",
+ include_traceback=debug,
+ ).makeError(
" tool output (expected '%s')"
% (
"', '".join(
@@ -314,7 +320,7 @@ def __init__(
if missing_values:
validation_errors.append(
- SourceLine(self.tool, "in").makeError(
+ SourceLine(self.tool, "in", include_traceback=debug).makeError(
"Step is missing required parameter%s '%s'"
% (
"s" if len(missing_values) > 1 else "",
@@ -326,7 +332,7 @@ def __init__(
if validation_errors:
raise ValidationException("\n".join(validation_errors))
- super(WorkflowStep, self).__init__(toolpath_object, loadingContext)
+ super().__init__(toolpath_object, loadingContext)
if self.embedded_tool.tool["class"] == "Workflow":
(feature, _) = self.get_requirement("SubworkflowFeatureRequirement")
@@ -357,14 +363,14 @@ def __init__(
inp_map = {i["id"]: i for i in inputparms}
for inp in scatter:
if inp not in inp_map:
- raise ValidationException(
- SourceLine(self.tool, "scatter").makeError(
- "Scatter parameter '%s' does not correspond to "
- "an input parameter of this step, expecting '%s'"
- % (
- shortname(inp),
- "', '".join(shortname(k) for k in inp_map.keys()),
- )
+ SourceLine(
+ self.tool, "scatter", ValidationException, debug
+ ).makeError(
+ "Scatter parameter '%s' does not correspond to "
+ "an input parameter of this step, expecting '%s'"
+ % (
+ shortname(inp),
+ "', '".join(shortname(k) for k in inp_map.keys()),
)
)
@@ -431,12 +437,11 @@ def job(
step_input[field] = job_order[inp["id"]]
try:
- for tool in self.embedded_tool.job(
+ yield from self.embedded_tool.job(
step_input,
functools.partial(self.receive_output, output_callbacks),
runtimeContext,
- ):
- yield tool
+ )
except WorkflowException:
_logger.error("Exception on step '%s'", runtimeContext.name)
raise
diff --git a/cwltool/workflow_job.py b/cwltool/workflow_job.py
index d18ab6ea89..d49614cb28 100644
--- a/cwltool/workflow_job.py
+++ b/cwltool/workflow_job.py
@@ -46,7 +46,9 @@
from .workflow import Workflow, WorkflowStep
-class WorkflowJobStep(object):
+class WorkflowJobStep:
+ """Generated for each step in Workflow.steps()."""
+
def __init__(self, step: "WorkflowStep") -> None:
"""Initialize this WorkflowJobStep."""
self.step = step
@@ -71,11 +73,12 @@ def job(
_logger.info("[%s] start", self.name)
- for j in self.step.job(joborder, output_callback, runtimeContext):
- yield j
+ yield from self.step.job(joborder, output_callback, runtimeContext)
+
+class ReceiveScatterOutput:
+ """Produced by the scatter generators."""
-class ReceiveScatterOutput(object):
def __init__(
self,
output_callback: ScatterOutputCallbackType,
@@ -438,7 +441,7 @@ def object_from_state(
break
if not found:
raise WorkflowException(
- u"All sources for '%s' are null" % (shortname(original_id))
+ "All sources for '%s' are null" % (shortname(original_id))
)
elif inp["pickValue"] == "the_only_non_null":
found = False
@@ -446,14 +449,14 @@ def object_from_state(
if v is not None:
if found:
raise WorkflowException(
- u"Expected only one source for '%s' to be non-null, got %s"
+ "Expected only one source for '%s' to be non-null, got %s"
% (shortname(original_id), seq)
)
found = True
inputobj[iid] = v
if not found:
raise WorkflowException(
- u"All sources for '%s' are null" % (shortname(original_id))
+ "All sources for '%s' are null" % (shortname(original_id))
)
elif inp["pickValue"] == "all_non_null":
inputobj[iid] = [v for v in seq if v is not None]
@@ -469,7 +472,9 @@ def object_from_state(
return inputobj
-class WorkflowJob(object):
+class WorkflowJob:
+ """Generates steps from the Workflow."""
+
def __init__(self, workflow: "Workflow", runtimeContext: RuntimeContext) -> None:
"""Initialize this WorkflowJob."""
self.workflow = workflow
@@ -628,9 +633,9 @@ def try_make_job(
i["id"]: i["valueFrom"] for i in step.tool["inputs"] if "valueFrom" in i
}
- loadContents = set(
+ loadContents = {
i["id"] for i in step.tool["inputs"] if i.get("loadContents")
- )
+ }
if len(valueFrom) > 0 and not bool(
self.workflow.get_requirement("StepInputExpressionRequirement")[0]
@@ -745,28 +750,27 @@ def valueFromFunc(
else:
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(
- u"[%s] job input %s", step.name, json_dumps(inputobj, indent=4)
+ "[%s] job input %s", step.name, json_dumps(inputobj, indent=4)
)
inputobj = postScatterEval(inputobj)
if inputobj is not None:
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(
- u"[%s] evaluated job input to %s",
+ "[%s] evaluated job input to %s",
step.name,
json_dumps(inputobj, indent=4),
)
jobs = step.job(inputobj, callback, runtimeContext)
else:
- _logger.info(u"[%s] will be skipped", step.name)
+ _logger.info("[%s] will be skipped", step.name)
callback({k["id"]: None for k in outputparms}, "skipped")
step.completed = True
jobs = (_ for _ in ())
step.submitted = True
- for j in jobs:
- yield j
+ yield from jobs
except WorkflowException:
raise
except Exception:
@@ -796,14 +800,10 @@ def job(
runtimeContext = runtimeContext.copy()
runtimeContext.outdir = None
+ debug = runtimeContext.debug
for index, inp in enumerate(self.tool["inputs"]):
- with SourceLine(
- self.tool["inputs"],
- index,
- WorkflowException,
- _logger.isEnabledFor(logging.DEBUG),
- ):
+ with SourceLine(self.tool["inputs"], index, WorkflowException, debug):
inp_id = shortname(inp["id"])
if inp_id in joborder:
self.state[inp["id"]] = WorkflowStateItem(
diff --git a/mypy.ini b/mypy.ini
index 3a6c4faab6..25ed405f07 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -1,8 +1,8 @@
[mypy]
-# --strict options as of mypy 0.720
+# --strict options as of mypy 0.800
warn_unused_configs = True
-disallow_subclassing_any = True
disallow_any_generics = True
+disallow_subclassing_any = True
disallow_untyped_calls = True
disallow_untyped_defs = True
disallow_incomplete_defs = True
@@ -12,7 +12,8 @@ no_implicit_optional = True
warn_redundant_casts = True
warn_unused_ignores = True
warn_return_any = True
-implicit_reexport = True
+implicit_reexport = False
+strict_equality = True
[mypy-schema_salad.tests.*]
ignore_errors = True
diff --git a/mypy_requirements.txt b/mypy_requirements.txt
new file mode 100644
index 0000000000..5d241a30ef
--- /dev/null
+++ b/mypy_requirements.txt
@@ -0,0 +1,3 @@
+mypy==0.910
+types-requests
+types-setuptools
diff --git a/release-test.sh b/release-test.sh
index 3908b81f1f..62bbc99168 100755
--- a/release-test.sh
+++ b/release-test.sh
@@ -7,38 +7,43 @@ export LC_ALL=C
package=cwltool
module=cwltool
-slug=${TRAVIS_PULL_REQUEST_SLUG:=common-workflow-language/cwltool}
-repo=https://github.com/${slug}.git
+
+if [ "$GITHUB_ACTIONS" = "true" ]; then
+ # We are running as a GH Action
+ repo=${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git
+ HEAD=${GITHUB_REF}
+else
+ repo=https://github.com/common-workflow-language/cwltool.git
+ HEAD=$(git rev-parse HEAD)
+fi
test_prefix=""
run_tests() {
local mod_loc
- mod_loc=$(pip show ${package} |
+ mod_loc=$(pip show ${package} |
grep ^Location | awk '{print $2}')/${module}
"${test_prefix}"bin/py.test "--ignore=${mod_loc}/schemas/" \
--pyargs -x ${module} -n auto --dist=loadfile
}
-pipver=7.0.2 # minimum required version of pip
-setuptoolsver=24.2.0 # required to generate correct metadata for
- # python_requires
+pipver=20.3b1 # minimum required version of pip for Python 3.9
+setuptoolsver=41.1.0 # required for Python 3.9
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
rm -Rf testenv? || /bin/true
-export HEAD=${TRAVIS_PULL_REQUEST_SHA:-$(git rev-parse HEAD)}
if [ "${RELEASE_SKIP}" != "head" ]
then
- virtualenv testenv1 -p python3
+ python3 -m venv testenv1
# First we test the head
# shellcheck source=/dev/null
source testenv1/bin/activate
rm -Rf testenv1/local
- rm testenv1/lib/python-wheels/setuptools* \
+ rm -f testenv1/lib/python-wheels/setuptools* \
&& pip install --force-reinstall -U pip==${pipver} \
- && pip install setuptools==${setuptoolsver} wheel
- make install-dep
- pip install .
- python setup.py test
+ && pip install setuptools==${setuptoolsver} wheel
+ pip install -rtest-requirements.txt
+ pip install -e .
+ make test
pip uninstall -y ${package} || true; pip uninstall -y ${package} || true; make install
mkdir testenv1/not-${module}
# if there is a subdir named '${module}' py.test will execute tests
@@ -48,50 +53,53 @@ then
test_prefix=../ run_tests; popd
fi
-virtualenv testenv2 -p python3
-virtualenv testenv3 -p python3
-virtualenv testenv4 -p python3
+python3 -m venv testenv2
+python3 -m venv testenv3
+python3 -m venv testenv4
rm -Rf testenv[234]/local
# Secondly we test via pip
-cd testenv2
+pushd testenv2
# shellcheck source=/dev/null
source bin/activate
-rm lib/python-wheels/setuptools* \
+rm -f lib/python-wheels/setuptools* \
&& pip install --force-reinstall -U pip==${pipver} \
&& pip install setuptools==${setuptoolsver} wheel
-pip install -e "git+${repo}@${HEAD}#egg=${package}" #[deps]
-cd src/${package}
-make install-dep
+# The following can fail if you haven't pushed your commits to ${repo}
+pip install -e "git+${repo}@${HEAD}#egg=${package}"
+pushd src/${package}
+pip install -rtest-requirements.txt
make dist
-python setup.py test
+make test
cp dist/${package}*tar.gz ../../../testenv3/
pip uninstall -y ${package} || true; pip uninstall -y ${package} || true; make install
-cd ../.. # no subdir named ${proj} here, safe for py.testing the installed module
+popd # ../.. no subdir named ${proj} here, safe for py.testing the installed module
# shellcheck disable=SC2086
run_tests
+popd
-# Is the distribution in testenv2 complete enough to build another
+# Is the source distribution in testenv2 complete enough to build another
# functional distribution?
-cd ../testenv3/
+pushd testenv3/
# shellcheck source=/dev/null
source bin/activate
-rm lib/python-wheels/setuptools* \
+rm -f lib/python-wheels/setuptools* \
&& pip install --force-reinstall -U pip==${pipver} \
&& pip install setuptools==${setuptoolsver} wheel
package_tar=$(find . -name "${package}*tar.gz")
pip install "-r${DIR}/test-requirements.txt"
-pip install "${package_tar}" # [deps]
+pip install "${package_tar}"
mkdir out
tar --extract --directory=out -z -f ${package}*.tar.gz
-cd out/${package}*
-make install-dep
+pushd out/${package}*
make dist
-python setup.py test
+make test
pip uninstall -y ${package} || true; pip uninstall -y ${package} || true; make install
mkdir ../not-${module}
pushd ../not-${module}
# shellcheck disable=SC2086
test_prefix=../../ run_tests; popd
+popd
+popd
diff --git a/requirements.txt b/requirements.txt
old mode 100755
new mode 100644
index 4a6b1c4539..9d66e7b2b9
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,11 +1,13 @@
requests>=2.4.3
-ruamel.yaml>=0.12.4,<=0.16.5
-rdflib>=4.2.2,<4.3
-shellescape>=3.4.1,<3.5
-schema-salad>=7,<8
+ruamel.yaml>=0.15,<0.17.17
+rdflib>=4.2.2,<6.1
+shellescape>=3.4.1,<3.9
+schema-salad>=8.2,<9
prov==1.5.1
-bagit==1.6.4
+bagit==1.8.1
mypy-extensions
-psutil
+psutil>=5.6.6
typing-extensions
coloredlogs
+pydot>=1.4.1
+argcomplete>=1.12.0
diff --git a/setup.cfg b/setup.cfg
index cd4a1c1167..4ead715c27 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -7,10 +7,6 @@ extend-ignore = E203, W503
[aliases]
test=pytest
-[tool:pytest]
-addopts=--ignore cwltool/schemas --basetemp ./tmp
-testpaths = tests
-
[tool:isort]
multi_line_output = 3
include_trailing_comma = True
diff --git a/setup.py b/setup.py
old mode 100755
new mode 100644
index b62c0ffd78..6e1e098d60
--- a/setup.py
+++ b/setup.py
@@ -2,10 +2,24 @@
"""Setup for the reference implementation of the CWL standards."""
import os
import sys
+import warnings
import setuptools.command.egg_info as egg_info_cmd
from setuptools import setup
+if os.name == "nt":
+ warnings.warn(
+ "The CWL reference runner (cwltool) no longer supports running "
+ "CWL workflows natively on MS Windows as its previous MS Windows "
+ "support was incomplete and untested. Instead, please see "
+ "https://pypi.org/project/cwltool/#ms-windows-users "
+ "for instructions on running cwltool via "
+ "Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
+ "CWL documents, then you can ignore this warning, but please "
+ "consider migrating to https://pypi.org/project/cwl-utils/ "
+ "for your CWL document processing needs."
+ )
+
SETUP_DIR = os.path.dirname(__file__)
README = os.path.join(SETUP_DIR, "README.rst")
@@ -76,7 +90,7 @@
setup(
name="cwltool",
- version="3.0",
+ version="3.1",
description="Common workflow language reference implementation",
long_description=open(README).read(),
long_description_content_type="text/x-rst",
@@ -94,12 +108,14 @@
"setuptools",
"requests >= 2.6.1", # >= 2.6.1 to workaround
# https://github.com/ionrock/cachecontrol/issues/137
- "ruamel.yaml >= 0.12.4, <= 0.16.5",
- "rdflib >= 4.2.2, < 4.3.0",
- "shellescape >= 3.4.1, < 3.5",
- "schema-salad >= 7, < 8",
+ "ruamel.yaml >= 0.15, < 0.17.17",
+ "rdflib >= 4.2.2, < 6.1.0",
+ "shellescape >= 3.4.1, < 3.9",
+ # 7.1.20210518142926 or later required due to
+ # https://github.com/common-workflow-language/schema_salad/issues/385
+ "schema-salad >= 8.2, < 9",
"mypy-extensions",
- "psutil",
+ "psutil >= 5.6.6",
"prov == 1.5.1",
"bagit >= 1.6.4",
"typing-extensions",
@@ -108,7 +124,7 @@
"argcomplete",
],
extras_require={
- "deps": ["galaxy-tool-util"],
+ "deps": ["galaxy-tool-util >= 21.1.0"],
"docs": [
"sphinx >= 2.2",
"sphinx-rtd-theme",
@@ -121,7 +137,7 @@
setup_requires=PYTEST_RUNNER,
test_suite="tests",
tests_require=[
- "pytest < 7",
+ "pytest >= 6.2, < 6.3",
"mock >= 2.0.0",
"pytest-mock >= 1.10.0",
"arcp >= 0.2.0",
@@ -141,12 +157,6 @@
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
- "Operating System :: OS Independent",
- "Operating System :: Microsoft :: Windows",
- "Operating System :: Microsoft :: Windows :: Windows 10",
- "Operating System :: Microsoft :: Windows :: Windows 8.1",
- # 'Operating System :: Microsoft :: Windows :: Windows 8', # not tested
- # 'Operating System :: Microsoft :: Windows :: Windows 7', # not tested
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
diff --git a/test-requirements.txt b/test-requirements.txt
index 0a54cd8045..a608d13aa1 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,7 +1,8 @@
-pytest < 7
+pytest >= 6.2, < 6.3
+pytest-xdist
mock >= 2.0.0
pytest-mock >= 1.10.0
pytest-cov
arcp >= 0.2.0
rdflib-jsonld >= 0.4.0
-attrs <19.2.0
+-rrequirements.txt
diff --git a/tests/arg-empty-prefix-separate-false.cwl b/tests/arg-empty-prefix-separate-false.cwl
index e86842fadc..7bee9f1a76 100644
--- a/tests/arg-empty-prefix-separate-false.cwl
+++ b/tests/arg-empty-prefix-separate-false.cwl
@@ -1,3 +1,4 @@
+#!/usr/bin/env cwl-runner
class: CommandLineTool
cwlVersion: v1.0
baseCommand: []
@@ -15,4 +16,4 @@ arguments:
shellQuote: false
valueFrom: "echo"
requirements:
-- class: ShellCommandRequirement
\ No newline at end of file
+- class: ShellCommandRequirement
diff --git a/tests/echo-badposition-expr.cwl b/tests/echo-badposition-expr.cwl
new file mode 100644
index 0000000000..f7d94b4e20
--- /dev/null
+++ b/tests/echo-badposition-expr.cwl
@@ -0,0 +1,28 @@
+#!/usr/bin/env cwl-runner
+
+class: CommandLineTool
+cwlVersion: v1.1
+requirements:
+ InlineJavascriptRequirement: {}
+inputs:
+ one:
+ type: int
+ inputBinding:
+ position: $(self)
+ two:
+ type: int
+ inputBinding:
+ valueFrom: sensation!
+ position: ${return "two";}
+arguments:
+ - position: ${return 2;}
+ valueFrom: singular
+outputs:
+ out:
+ type: string
+ outputBinding:
+ glob: out.txt
+ loadContents: true
+ outputEval: $(self[0].contents)
+baseCommand: echo
+stdout: out.txt
diff --git a/tests/env3.cwl b/tests/env3.cwl
new file mode 100644
index 0000000000..14718fd02a
--- /dev/null
+++ b/tests/env3.cwl
@@ -0,0 +1,9 @@
+#!/usr/bin/env cwl-runner
+cwlVersion: v1.0
+class: CommandLineTool
+inputs: []
+baseCommand: env
+arguments: ["-0"]
+outputs:
+ env:
+ type: stdout
diff --git a/tests/env4.cwl b/tests/env4.cwl
new file mode 100644
index 0000000000..13f988b520
--- /dev/null
+++ b/tests/env4.cwl
@@ -0,0 +1,19 @@
+#!/usr/bin/env cwl-runner
+cwlVersion: v1.0
+class: CommandLineTool
+
+requirements:
+ InitialWorkDirRequirement:
+ listing:
+ - entryname: env0.py
+ entry: |
+ import os
+ for k, v in os.environ.items():
+ print(f"{k}={v}", end="\0")
+
+inputs: []
+baseCommand: python3
+arguments: ["env0.py"]
+outputs:
+ env:
+ type: stdout
diff --git a/tests/env_with_software_req.yml b/tests/env_with_software_req.yml
new file mode 100644
index 0000000000..797fc8b7db
--- /dev/null
+++ b/tests/env_with_software_req.yml
@@ -0,0 +1,7 @@
+cwltool:overrides:
+ env3.cwl:
+ requirements:
+ SoftwareRequirement:
+ packages:
+ - package: 'random-lines'
+ version: '1.0'
diff --git a/tests/iwdr_bad_expr.cwl b/tests/iwdr_bad_expr.cwl
new file mode 100644
index 0000000000..8ba9eb0c02
--- /dev/null
+++ b/tests/iwdr_bad_expr.cwl
@@ -0,0 +1,24 @@
+#!/usr/bin/env cwl-runner
+class: CommandLineTool
+cwlVersion: v1.0
+
+requirements:
+ DockerRequirement:
+ dockerPull: debian:stable-slim
+ InlineJavascriptRequirement: {}
+ InitialWorkDirRequirement:
+ listing: |
+ ${
+ return [{"class": "Directory",
+ "basename": "subdir",
+ "listing": [ inputs.example ]
+ }], 42}
+
+inputs:
+ example: File
+
+outputs:
+ same:
+ type: File
+ outputBinding:
+ glob: subdir/$(inputs.example.basename)
diff --git a/tests/iwdr_dir_literal_real_file.cwl b/tests/iwdr_dir_literal_real_file.cwl
index 6cca7c4a06..105b9b62b5 100644
--- a/tests/iwdr_dir_literal_real_file.cwl
+++ b/tests/iwdr_dir_literal_real_file.cwl
@@ -1,3 +1,4 @@
+#!/usr/bin/env cwl-runner
class: CommandLineTool
cwlVersion: v1.0
diff --git a/tests/secondary-files-bad.cwl b/tests/secondary-files-bad.cwl
new file mode 100644
index 0000000000..8bf3157dce
--- /dev/null
+++ b/tests/secondary-files-bad.cwl
@@ -0,0 +1,36 @@
+#!/usr/bin/env cwl-runner
+
+cwlVersion: v1.1
+
+requirements:
+ - class: InlineJavascriptRequirement
+ - class: ShellCommandRequirement
+
+class: CommandLineTool
+
+inputs:
+ fasta_path:
+ type: File
+ secondaryFiles:
+ - pattern: ^.fastq
+ required: true
+ - pattern: .crai
+ required: "${ return 42 }"
+ - .bai?
+ - pattern: "${ return null }"
+
+outputs:
+ bai_list:
+ type: File
+ outputBinding:
+ glob: lsout
+ secondaryFiles:
+ - .bai?
+ - pattern: "${ return null }"
+ - pattern: .crai
+ required: false
+ - pattern: .idx
+
+baseCommand: ["ls"]
+
+stdout: lsout
diff --git a/tests/secondary-files.cwl b/tests/secondary-files.cwl
index 04849d5d95..3fb913790c 100644
--- a/tests/secondary-files.cwl
+++ b/tests/secondary-files.cwl
@@ -1,6 +1,6 @@
#!/usr/bin/env cwl-runner
-cwlVersion: v1.1.0-dev1
+cwlVersion: v1.1
requirements:
- class: InlineJavascriptRequirement
@@ -33,4 +33,4 @@ outputs:
baseCommand: ["ls"]
-stdout: lsout
\ No newline at end of file
+stdout: lsout
diff --git a/tests/subgraph/1432.cwl b/tests/subgraph/1432.cwl
new file mode 100644
index 0000000000..54d553875a
--- /dev/null
+++ b/tests/subgraph/1432.cwl
@@ -0,0 +1,29 @@
+class: Workflow
+cwlVersion: v1.2
+inputs:
+ - id: step1_in
+ type: string
+ - id: step2_in
+ type: string
+outputs:
+ - id: step1_out
+ type: string
+ outputSource: step1/out
+ - id: step2_out
+ type: string
+ outputSource: step2/out
+steps:
+ - id: step1
+ in:
+ - id: inp
+ source: step1_in
+ out:
+ - id: out
+ run: ../echo.cwl
+ - id: step2
+ in:
+ - id: inp
+ source: step2_in
+ out:
+ - id: out
+ run: ../echo.cwl
diff --git a/tests/subgraph/extract_step2_1432.json b/tests/subgraph/extract_step2_1432.json
new file mode 100644
index 0000000000..00bd87499a
--- /dev/null
+++ b/tests/subgraph/extract_step2_1432.json
@@ -0,0 +1,35 @@
+{
+ "class": "Workflow",
+ "cwlVersion": "v1.2",
+ "id": "subgraph/1432.cwl",
+ "inputs": [
+ {
+ "id": "subgraph/1432.cwl#step2_in",
+ "type": "string"
+ }
+ ],
+ "outputs": [
+ {
+ "id": "subgraph/1432.cwl#step2_out",
+ "outputSource": "subgraph/1432.cwl#step2/out",
+ "type": "string"
+ }
+ ],
+ "steps": [
+ {
+ "id": "subgraph/1432.cwl#step2",
+ "in": [
+ {
+ "id": "subgraph/1432.cwl#step2/inp",
+ "source": "subgraph/1432.cwl#step2_in"
+ }
+ ],
+ "out": [
+ {
+ "id": "subgraph/1432.cwl#step2/out"
+ }
+ ],
+ "run": "echo.cwl"
+ }
+ ]
+}
diff --git a/tests/subgraph/single_step1.json b/tests/subgraph/single_step1.json
new file mode 100644
index 0000000000..8f140addae
--- /dev/null
+++ b/tests/subgraph/single_step1.json
@@ -0,0 +1,23 @@
+{"class": "Workflow",
+ "cwlVersion": "v1.0",
+ "id": "count-lines1-wf.cwl",
+ "inputs": [{"id": "file1", "type": "Any"}],
+ "outputs": [{"id": "output",
+ "outputSource": "count-lines1-wf.cwl#step1/output",
+ "type": "Any"}],
+ "steps": [{"id": "count-lines1-wf.cwl#step1",
+ "in": [{"id": "count-lines1-wf.cwl#step1/file1",
+ "source": "file1"}],
+ "inputs": [{"_tool_entry": {"id": "wc-tool.cwl#file1",
+ "type": "File"},
+ "id": "count-lines1-wf.cwl#step1/file1",
+ "source": "count-lines1-wf.cwl#file1",
+ "type": "File"}],
+ "out": ["count-lines1-wf.cwl#step1/output"],
+ "outputs": [{"_tool_entry": {"id": "wc-tool.cwl#output",
+ "outputBinding": {"glob": "output"},
+ "type": "File"},
+ "id": "count-lines1-wf.cwl#step1/output",
+ "outputBinding": {"glob": "output"},
+ "type": "File"}],
+ "run": "wc-tool.cwl"}]}
diff --git a/tests/subgraph/single_step2.json b/tests/subgraph/single_step2.json
new file mode 100644
index 0000000000..68d2216880
--- /dev/null
+++ b/tests/subgraph/single_step2.json
@@ -0,0 +1,23 @@
+{"class": "Workflow",
+ "cwlVersion": "v1.0",
+ "id": "count-lines1-wf.cwl",
+ "inputs": [{"id": "file1", "type": "Any"}],
+ "outputs": [{"id": "output",
+ "outputSource": "count-lines1-wf.cwl#step2/output",
+ "type": "Any"}],
+ "steps": [{"id": "count-lines1-wf.cwl#step2",
+ "in": [{"id": "count-lines1-wf.cwl#step2/file1",
+ "source": "file1"}],
+ "inputs": [{"_tool_entry": {"id": "parseInt-tool.cwl#file1",
+ "inputBinding": {"loadContents": true},
+ "type": "File"},
+ "id": "count-lines1-wf.cwl#step2/file1",
+ "inputBinding": {"loadContents": true},
+ "source": "count-lines1-wf.cwl#step1/output",
+ "type": "File"}],
+ "out": ["count-lines1-wf.cwl#step2/output"],
+ "outputs": [{"_tool_entry": {"id": "parseInt-tool.cwl#output",
+ "type": "int"},
+ "id": "count-lines1-wf.cwl#step2/output",
+ "type": "int"}],
+ "run": "parseInt-tool.cwl"}]}
diff --git a/tests/subgraph/single_step3.json b/tests/subgraph/single_step3.json
new file mode 100644
index 0000000000..5ef062b352
--- /dev/null
+++ b/tests/subgraph/single_step3.json
@@ -0,0 +1,24 @@
+{"class": "Workflow",
+ "cwlVersion": "v1.0",
+ "id": "count-lines1-wf.cwl",
+ "inputs": [{"id": "file1", "type": "Any"}],
+ "outputs": [{"id": "output",
+ "outputSource": "count-lines1-wf.cwl#step3/output",
+ "type": "Any"}],
+ "steps": [{"id": "count-lines1-wf.cwl#step3",
+ "in": [{"id": "count-lines1-wf.cwl#step3/file1",
+ "source": "file1"}],
+ "inputs": [{"_tool_entry": {"id": "wc-tool.cwl#file1",
+ "type": "File"},
+ "id": "count-lines1-wf.cwl#step3/file1",
+ "source": "count-lines1-wf.cwl#file2",
+ "type": "File"}],
+ "label": "step that is independent of step1 and step2",
+ "out": ["count-lines1-wf.cwl#step3/output"],
+ "outputs": [{"_tool_entry": {"id": "wc-tool.cwl#output",
+ "outputBinding": {"glob": "output"},
+ "type": "File"},
+ "id": "count-lines1-wf.cwl#step3/output",
+ "outputBinding": {"glob": "output"},
+ "type": "File"}],
+ "run": "wc-tool.cwl"}]}
diff --git a/tests/subgraph/single_step4.json b/tests/subgraph/single_step4.json
new file mode 100644
index 0000000000..b8c4979433
--- /dev/null
+++ b/tests/subgraph/single_step4.json
@@ -0,0 +1,24 @@
+{"class": "Workflow",
+ "cwlVersion": "v1.0",
+ "id": "count-lines1-wf.cwl",
+ "inputs": [{"id": "file1", "type": "Any"}],
+ "outputs": [{"id": "output",
+ "outputSource": "count-lines1-wf.cwl#step4/output",
+ "type": "Any"}],
+ "steps": [{"id": "count-lines1-wf.cwl#step4",
+ "in": [{"id": "count-lines1-wf.cwl#step4/file1",
+ "source": "file1"}],
+ "inputs": [{"_tool_entry": {"id": "parseInt-tool.cwl#file1",
+ "inputBinding": {"loadContents": true},
+ "type": "File"},
+ "id": "count-lines1-wf.cwl#step4/file1",
+ "inputBinding": {"loadContents": true},
+ "source": "count-lines1-wf.cwl#step1/output",
+ "type": "File"}],
+ "label": "step that also depends on step1",
+ "out": ["count-lines1-wf.cwl#step4/output"],
+ "outputs": [{"_tool_entry": {"id": "parseInt-tool.cwl#output",
+ "type": "int"},
+ "id": "count-lines1-wf.cwl#step4/output",
+ "type": "int"}],
+ "run": "parseInt-tool.cwl"}]}
diff --git a/tests/subgraph/single_step5.json b/tests/subgraph/single_step5.json
new file mode 100644
index 0000000000..07c91097a0
--- /dev/null
+++ b/tests/subgraph/single_step5.json
@@ -0,0 +1,30 @@
+{"class": "Workflow",
+ "cwlVersion": "v1.0",
+ "id": "count-lines1-wf.cwl",
+ "inputs": [{"id": "file1", "type": "Any"}, {"id": "file3", "type": "Any"}],
+ "outputs": [{"id": "output",
+ "outputSource": "count-lines1-wf.cwl#step5/output",
+ "type": "Any"}],
+ "steps": [{"id": "count-lines1-wf.cwl#step5",
+ "in": [{"id": "count-lines1-wf.cwl#step5/file1", "source": "file1"},
+ {"id": "count-lines1-wf.cwl#step5/file3",
+ "source": "file3"}],
+ "inputs": [{"_tool_entry": {"id": "wc-tool.cwl#file1",
+ "type": "File"},
+ "id": "count-lines1-wf.cwl#step5/file1",
+ "source": "count-lines1-wf.cwl#file1",
+ "type": "File"},
+ {"id": "count-lines1-wf.cwl#step5/file3",
+ "not_connected": true,
+ "source": "count-lines1-wf.cwl#file3",
+ "type": "Any",
+ "used_by_step": false}],
+ "label": "step with two inputs",
+ "out": ["count-lines1-wf.cwl#step5/output"],
+ "outputs": [{"_tool_entry": {"id": "wc-tool.cwl#output",
+ "outputBinding": {"glob": "output"},
+ "type": "File"},
+ "id": "count-lines1-wf.cwl#step5/output",
+ "outputBinding": {"glob": "output"},
+ "type": "File"}],
+ "run": "wc-tool.cwl"}]}
diff --git a/tests/test_content_type.py b/tests/test_content_type.py
new file mode 100644
index 0000000000..edc55d236f
--- /dev/null
+++ b/tests/test_content_type.py
@@ -0,0 +1,25 @@
+from typing import Any
+
+import pydot # type: ignore
+
+from .util import get_main_output
+
+
+def test_content_types(caplog: Any) -> None:
+ for _ in ("js_output.cwl", "js_output_workflow.cwl"):
+ commands = [
+ "https://raw.githubusercontent.com/common-workflow-language/common-workflow-language/main/v1.0/v1.0/test-cwl-out2.cwl",
+ "https://github.com/common-workflow-language/common-workflow-language/blob/main/v1.0/v1.0/empty.json",
+ ]
+ error_code, _, stderr = get_main_output(commands)
+
+ found = False
+ for record in caplog.records:
+ if (
+ record.name == "salad"
+ and "got content-type of 'text/html'" in record.message
+ ):
+ found = True
+ break
+ assert found
+ assert error_code == 1, stderr
diff --git a/tests/test_context.py b/tests/test_context.py
index 2dd517186c..7542160482 100644
--- a/tests/test_context.py
+++ b/tests/test_context.py
@@ -1,14 +1,14 @@
import subprocess
+import sys
from cwltool.context import RuntimeContext
+from cwltool.factory import Factory
-from .util import get_data, get_windows_safe_factory, windows_needs_docker
+from .util import get_data
-@windows_needs_docker
def test_replace_default_stdout_stderr() -> None:
"""Test our ability to replace the default stdout/err."""
- import sys
# break stdout & stderr
original_stdout = sys.stdout
@@ -20,7 +20,7 @@ def test_replace_default_stdout_stderr() -> None:
runtime_context = RuntimeContext()
runtime_context.default_stdout = subprocess.DEVNULL # type: ignore
runtime_context.default_stderr = subprocess.DEVNULL # type: ignore
- factory = get_windows_safe_factory(runtime_context=runtime_context)
+ factory = Factory(None, None, runtime_context)
echo = factory.make(get_data("tests/echo.cwl"))
assert echo(inp="foo") == {"out": "foo\n"}
diff --git a/tests/test_dependencies.py b/tests/test_dependencies.py
index ea1e198314..db9366f7bc 100644
--- a/tests/test_dependencies.py
+++ b/tests/test_dependencies.py
@@ -1,20 +1,23 @@
+"""Tests of satisfying SoftwareRequirement via dependencies."""
import os
-from distutils import spawn
+from pathlib import Path
+from shutil import which
+from types import ModuleType
+from typing import Optional
import pytest
-from cwltool.utils import onWindows
-
-from .util import get_data, get_main_output, needs_docker
+from .util import get_data, get_main_output, get_tool_env, needs_docker
+deps = None # type: Optional[ModuleType]
try:
- from galaxy.tools import deps # type: ignore
+ from galaxy.tool_util import deps
except ImportError:
- deps = None
+ pass
@needs_docker
-@pytest.mark.skipif(not deps, reason="galaxy-lib is not installed")
+@pytest.mark.skipif(not deps, reason="galaxy-tool-util is not installed")
def test_biocontainers() -> None:
wflow = get_data("tests/seqtk_seq.cwl")
job = get_data("tests/seqtk_seq_job.json")
@@ -23,8 +26,7 @@ def test_biocontainers() -> None:
assert error_code == 0
-@pytest.mark.skipif(onWindows(), reason="bioconda currently not working on MS Windows")
-@pytest.mark.skipif(not deps, reason="galaxy-lib is not installed")
+@pytest.mark.skipif(not deps, reason="galaxy-tool-util is not installed")
def test_bioconda() -> None:
wflow = get_data("tests/seqtk_seq.cwl")
job = get_data("tests/seqtk_seq_job.json")
@@ -35,14 +37,14 @@ def test_bioconda() -> None:
assert error_code == 0, stderr
-@pytest.mark.skipif(
- not spawn.find_executable("modulecmd"), reason="modulecmd not installed"
-)
-def test_modules() -> None:
+@pytest.mark.skipif(not deps, reason="galaxy-tool-util is not installed")
+@pytest.mark.skipif(not which("modulecmd"), reason="modulecmd not installed")
+def test_modules(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Do a basic smoke test using environment modules to satisfy a SoftwareRequirement."""
wflow = get_data("tests/random_lines.cwl")
job = get_data("tests/random_lines_job.json")
- os.environ["MODULEPATH"] = os.path.join(
- os.getcwd(), "tests/test_deps_env/modulefiles"
+ monkeypatch.setenv(
+ "MODULEPATH", os.path.join(os.getcwd(), "tests/test_deps_env/modulefiles")
)
error_code, _, stderr = get_main_output(
[
@@ -55,3 +57,28 @@ def test_modules() -> None:
)
assert error_code == 0, stderr
+
+
+@pytest.mark.skipif(not deps, reason="galaxy-tool-util is not installed")
+@pytest.mark.skipif(not which("modulecmd"), reason="modulecmd not installed")
+def test_modules_environment(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
+ """
+ Check that the environment variables set by a module are being propagated correctly.
+
+ Do so by by running `env` as the tool and parsing its output.
+ """
+ monkeypatch.setenv(
+ "MODULEPATH", os.path.join(os.getcwd(), "tests/test_deps_env/modulefiles")
+ )
+ tool_env = get_tool_env(
+ tmp_path,
+ [
+ "--beta-dependency-resolvers-configuration",
+ get_data("tests/test_deps_env_modules_resolvers_conf.yml"),
+ ],
+ get_data("tests/env_with_software_req.yml"),
+ )
+
+ assert tool_env["TEST_VAR_MODULE"] == "environment variable ends in space "
+ tool_path = tool_env["PATH"].split(":")
+ assert get_data("tests/test_deps_env/random-lines/1.0/scripts") in tool_path
diff --git a/tests/test_deps_env/modulefiles/random-lines/1.0 b/tests/test_deps_env/modulefiles/random-lines/1.0
index d3f358ba86..3f76238980 100644
--- a/tests/test_deps_env/modulefiles/random-lines/1.0
+++ b/tests/test_deps_env/modulefiles/random-lines/1.0
@@ -13,3 +13,4 @@ if {![file exists $prefix]} {
}
prepend-path PATH ${prefix}/scripts
+setenv TEST_VAR_MODULE "environment variable ends in space "
diff --git a/tests/test_docker.py b/tests/test_docker.py
index f0399e2b94..57f2075e5b 100644
--- a/tests/test_docker.py
+++ b/tests/test_docker.py
@@ -1,6 +1,6 @@
-from distutils import spawn
-
-import py.path
+"""Tests for docker engine."""
+from pathlib import Path
+from shutil import which
from cwltool.main import main
@@ -8,20 +8,21 @@
@needs_docker
-def test_docker_workflow(tmpdir: py.path.local) -> None:
+def test_docker_workflow(tmp_path: Path) -> None:
+ """Basic test for docker with a CWL Workflow."""
result_code, _, stderr = get_main_output(
[
"--default-container",
"debian",
"--outdir",
- str(tmpdir),
+ str(tmp_path),
get_data("tests/wf/hello-workflow.cwl"),
"--usermessage",
"hello",
]
)
assert "completed success" in stderr
- assert (tmpdir / "response.txt").read_text("utf-8") == "hello"
+ assert (tmp_path / "response.txt").read_text("utf-8") == "hello"
assert result_code == 0
@@ -35,7 +36,7 @@ def test_docker_iwdr() -> None:
"hello",
]
)
- docker_installed = bool(spawn.find_executable("docker"))
+ docker_installed = bool(which("docker"))
if docker_installed:
assert result_code == 0
else:
diff --git a/tests/test_docker_warning.py b/tests/test_docker_warning.py
deleted file mode 100644
index 3aec636096..0000000000
--- a/tests/test_docker_warning.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from typing import Any, cast
-
-import pytest
-from ruamel.yaml.comments import CommentedMap
-from schema_salad.sourceline import cmap
-
-from cwltool import command_line_tool
-from cwltool.context import LoadingContext, RuntimeContext
-from cwltool.utils import onWindows, windows_default_container_id
-
-
-@pytest.mark.skip(not onWindows(), reason="MS Windows only") # type: ignore
-def test_default_docker_warning(mocker: Any) -> None:
- """Check warning when default docker Container is used on Windows."""
- mocker.patch("cwltool.command_line_tool._logger")
-
- tool = command_line_tool.CommandLineTool(
- cast(CommentedMap, cmap({"inputs": [], "outputs": []})), LoadingContext()
- )
- tool.make_job_runner(
- RuntimeContext({"find_default_container": lambda x: "frolvlad/alpine-bash"})
- )
-
- command_line_tool._logger.warning.assert_called_with( # type: ignore
- command_line_tool.DEFAULT_CONTAINER_MSG,
- windows_default_container_id,
- windows_default_container_id,
- )
diff --git a/tests/test_empty_input.py b/tests/test_empty_input.py
index dbcc357bd6..fc68600840 100644
--- a/tests/test_empty_input.py
+++ b/tests/test_empty_input.py
@@ -1,19 +1,24 @@
from io import StringIO
+from pathlib import Path
from cwltool.main import main
-from .util import get_data, temp_dir, windows_needs_docker
+from .util import get_data
-@windows_needs_docker
-def test_empty_input() -> None:
+def test_empty_input(tmp_path: Path) -> None:
+ """Affirm that an empty input works."""
empty_json = "{}"
empty_input = StringIO(empty_json)
- with temp_dir() as tmpdir:
- params = ["--outdir", tmpdir, get_data("tests/wf/no-parameters-echo.cwl"), "-"]
+ params = [
+ "--outdir",
+ str(tmp_path),
+ get_data("tests/wf/no-parameters-echo.cwl"),
+ "-",
+ ]
- try:
- assert main(params, stdin=empty_input) == 0
- except SystemExit as err:
- assert err.code == 0
+ try:
+ assert main(params, stdin=empty_input) == 0
+ except SystemExit as err:
+ assert err.code == 0
diff --git a/tests/test_environment.py b/tests/test_environment.py
new file mode 100644
index 0000000000..44b0f76a47
--- /dev/null
+++ b/tests/test_environment.py
@@ -0,0 +1,275 @@
+"""Test passing of environment variables to tools."""
+import os
+from abc import ABC, abstractmethod
+from pathlib import Path
+from typing import Any, Callable, Dict, List, Mapping, Union
+
+import pytest
+
+from cwltool.singularity import get_version
+
+from .util import env_accepts_null, get_tool_env, needs_docker, needs_singularity
+
+# None => accept anything, just require the key is present
+# str => string equality
+# Callable => call the function with the value - True => OK, False => fail
+# TODO: maybe add regex?
+Env = Mapping[str, str]
+CheckerTypes = Union[None, str, Callable[[str, Env], bool]]
+EnvChecks = Dict[str, CheckerTypes]
+
+
+def assert_envvar_matches(check: CheckerTypes, k: str, env: Mapping[str, str]) -> None:
+ """Assert that the check is satisfied by the key in the env."""
+ if check is None:
+ pass
+ else:
+ v = env[k]
+ if isinstance(check, str):
+ assert v == check, f'Environment variable {k} == "{v}" != "{check}"'
+ else:
+ assert check(v, env), f'Environment variable {k}="{v}" fails check'
+
+
+def assert_env_matches(
+ checks: EnvChecks, env: Mapping[str, str], allow_unexpected: bool = False
+) -> None:
+ """Assert that all checks are satisfied by the Mapping.
+
+ Optional flag `allow_unexpected` (default = False) will allow the
+ Mapping to contain extra keys which are not checked.
+ """
+ e = dict(env)
+ for k, check in checks.items():
+ assert k in e
+ v = e.pop(k)
+ assert_envvar_matches(check, k, env)
+
+ if not allow_unexpected:
+ # If we have to use env4.cwl, there may be unwanted variables
+ # (see cwltool.env_to_stdout docstrings).
+ # LC_CTYPE if platform has glibc
+ # __CF_USER_TEXT_ENCODING on macOS
+ if not env_accepts_null():
+ e.pop("LC_CTYPE", None)
+ e.pop("__CF_USER_TEXT_ENCODING", None)
+ assert len(e) == 0, f"Unexpected environment variable(s): {', '.join(e.keys())}"
+
+
+class CheckHolder(ABC):
+ """Base class for check factory functions and other data required to parametrize the tests below."""
+
+ @staticmethod
+ @abstractmethod
+ def checks(tmp_prefix: str) -> EnvChecks:
+ """Return a mapping from environment variable names to how to check for correctness."""
+ pass
+
+ # Any flags to pass to cwltool to force use of the correct container
+ flags: List[str]
+
+ # Does the env tool (maybe in our container) accept a `-0` flag?
+ env_accepts_null: bool
+
+ pass
+
+
+class NoContainer(CheckHolder):
+ """No containers at all, just run in the host."""
+
+ @staticmethod
+ def checks(tmp_prefix: str) -> EnvChecks:
+ """Create checks."""
+ return {
+ "TMPDIR": lambda v, _: v.startswith(tmp_prefix),
+ "HOME": lambda v, _: v.startswith(tmp_prefix),
+ "PATH": os.environ["PATH"],
+ }
+
+ flags = ["--no-container"]
+ env_accepts_null = env_accepts_null()
+
+
+class Docker(CheckHolder):
+ """Run in a Docker container."""
+
+ @staticmethod
+ def checks(tmp_prefix: str) -> EnvChecks:
+ """Create checks."""
+
+ def HOME(v: str, env: Env) -> bool:
+ # Want /whatever
+ parts = os.path.split(v)
+ return len(parts) == 2 and parts[0] == "/"
+
+ return {
+ "HOME": HOME,
+ "TMPDIR": "/tmp",
+ "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HOSTNAME": None,
+ }
+
+ flags = ["--default-container=debian"]
+ env_accepts_null = True
+
+
+class Singularity(CheckHolder):
+ """Run in a Singularity container."""
+
+ @staticmethod
+ def checks(tmp_prefix: str) -> EnvChecks:
+ """Create checks."""
+
+ def PWD(v: str, env: Env) -> bool:
+ return v == env["HOME"]
+
+ ans: EnvChecks = {
+ "HOME": None,
+ "LANG": "C",
+ "LD_LIBRARY_PATH": None,
+ "PATH": None,
+ "PS1": None,
+ "PWD": PWD,
+ "TMPDIR": "/tmp",
+ }
+
+ # Singularity variables appear to be in flux somewhat.
+ version = get_version().split(".")
+ vmajor = int(version[0])
+ assert vmajor == 3, "Tests only work for Singularity 3"
+ vminor = int(version[1])
+ sing_vars: EnvChecks = {
+ "SINGULARITY_CONTAINER": None,
+ "SINGULARITY_NAME": None,
+ }
+ if vminor < 5:
+ sing_vars["SINGULARITY_APPNAME"] = None
+ if vminor >= 5:
+ sing_vars["PROMPT_COMMAND"] = None
+ sing_vars["SINGULARITY_ENVIRONMENT"] = None
+ if vminor == 5:
+ sing_vars["SINGULARITY_INIT"] = "1"
+ elif vminor > 5:
+ sing_vars["SINGULARITY_COMMAND"] = "exec"
+ if vminor >= 7:
+
+ def BIND(v: str, env: Env) -> bool:
+ return v.startswith(tmp_prefix) and v.endswith(":/tmp")
+
+ sing_vars["SINGULARITY_BIND"] = BIND
+
+ ans.update(sing_vars)
+
+ # Singularity automatically passes some variables through, if
+ # they exist. This seems to be constant from 3.1 but isn't
+ # documented (see source /internal/pkg/util/env/clean.go).
+ autopass = (
+ "ALL_PROXY",
+ "FTP_PROXY",
+ "HTTP_PROXY",
+ "HTTPS_PROXY",
+ "NO_PROXY",
+ "TERM",
+ )
+ for vname in autopass:
+ if vname in os.environ:
+ ans[vname] = os.environ[vname]
+
+ return ans
+
+ flags = ["--default-container=debian", "--singularity"]
+ env_accepts_null = True
+
+
+# CRT = container runtime
+CRT_PARAMS = pytest.mark.parametrize(
+ "crt_params",
+ [
+ NoContainer(),
+ pytest.param(Docker(), marks=needs_docker),
+ pytest.param(Singularity(), marks=needs_singularity),
+ ],
+)
+
+
+@CRT_PARAMS
+def test_basic(crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any) -> None:
+ """Test that basic env vars (only) show up."""
+ tmp_prefix = str(tmp_path / "canary")
+ extra_env = {
+ "USEDVAR": "VARVAL",
+ "UNUSEDVAR": "VARVAL",
+ }
+ args = crt_params.flags + [f"--tmpdir-prefix={tmp_prefix}"]
+ env = get_tool_env(
+ tmp_path,
+ args,
+ extra_env=extra_env,
+ monkeypatch=monkeypatch,
+ runtime_env_accepts_null=crt_params.env_accepts_null,
+ )
+ checks = crt_params.checks(tmp_prefix)
+ assert_env_matches(checks, env)
+
+
+@CRT_PARAMS
+def test_preserve_single(
+ crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any
+) -> None:
+ """Test that preserving a single env var works."""
+ tmp_prefix = str(tmp_path / "canary")
+ extra_env = {
+ "USEDVAR": "VARVAL",
+ "UNUSEDVAR": "VARVAL",
+ }
+ args = crt_params.flags + [
+ f"--tmpdir-prefix={tmp_prefix}",
+ "--preserve-environment=USEDVAR",
+ ]
+ env = get_tool_env(
+ tmp_path,
+ args,
+ extra_env=extra_env,
+ monkeypatch=monkeypatch,
+ runtime_env_accepts_null=crt_params.env_accepts_null,
+ )
+ checks = crt_params.checks(tmp_prefix)
+ checks["USEDVAR"] = extra_env["USEDVAR"]
+ assert_env_matches(checks, env)
+
+
+@CRT_PARAMS
+def test_preserve_all(
+ crt_params: CheckHolder, tmp_path: Path, monkeypatch: Any
+) -> None:
+ """Test that preserving all works."""
+ tmp_prefix = str(tmp_path / "canary")
+ extra_env = {
+ "USEDVAR": "VARVAL",
+ "UNUSEDVAR": "VARVAL",
+ }
+ args = crt_params.flags + [
+ f"--tmpdir-prefix={tmp_prefix}",
+ "--preserve-entire-environment",
+ ]
+ env = get_tool_env(
+ tmp_path,
+ args,
+ extra_env=extra_env,
+ monkeypatch=monkeypatch,
+ runtime_env_accepts_null=crt_params.env_accepts_null,
+ )
+ checks = crt_params.checks(tmp_prefix)
+ checks.update(extra_env)
+
+ for vname, val in env.items():
+ try:
+ assert_envvar_matches(checks[vname], vname, env)
+ except KeyError:
+ assert val == os.environ[vname]
+ except AssertionError:
+ if vname == "HOME" or vname == "TMPDIR":
+ # These MUST be OK
+ raise
+ # Other variables can be overriden
+ assert val == os.environ[vname]
diff --git a/tests/test_examples.py b/tests/test_examples.py
index 65a46627aa..060efecf95 100644
--- a/tests/test_examples.py
+++ b/tests/test_examples.py
@@ -1,6 +1,7 @@
import json
import logging
import os
+import re
import stat
import subprocess
import sys
@@ -9,7 +10,6 @@
from typing import Any, Dict, List, Union, cast
from urllib.parse import urlparse
-import py.path
import pydot # type: ignore
import pytest
from ruamel.yaml.comments import CommentedMap, CommentedSeq
@@ -27,16 +27,9 @@
from cwltool.main import main
from cwltool.process import CWL_IANA
from cwltool.sandboxjs import JavascriptException
-from cwltool.utils import CWLObjectType, dedup, onWindows
-
-from .util import (
- get_data,
- get_main_output,
- get_windows_safe_factory,
- needs_docker,
- temp_dir,
- windows_needs_docker,
-)
+from cwltool.utils import CWLObjectType, dedup
+
+from .util import get_data, get_main_output, needs_docker, working_directory
sys.argv = [""]
@@ -257,11 +250,8 @@ def test_parameter_to_expression_interpolate_escapebehavior(
@pytest.mark.parametrize("pattern", interpolate_bad_parameters)
def test_expression_interpolate_failures(pattern: str) -> None:
result = None
- try:
+ with pytest.raises(JavascriptException):
result = expr.interpolate(pattern, interpolate_input)
- except JavascriptException:
- return
- assert False, 'Should have produced a JavascriptException, got "{}".'.format(result)
interpolate_escapebehavior = (
@@ -299,9 +289,8 @@ def test_expression_interpolate_escapebehavior(
)
-@windows_needs_docker
def test_factory() -> None:
- factory = get_windows_safe_factory()
+ factory = cwltool.factory.Factory()
echo = factory.make(get_data("tests/echo.cwl"))
assert echo(inp="foo") == {"out": "foo\n"}
@@ -530,6 +519,23 @@ def test_trick_scandeps() -> None:
assert json.loads(stream.getvalue())["secondaryFiles"][0]["location"][:2] != "_:"
+def test_scandeps_defaults_with_secondaryfiles() -> None:
+ stream = StringIO()
+
+ main(
+ [
+ "--print-deps",
+ "--relative-deps=cwd",
+ "--debug",
+ get_data("tests/wf/trick_defaults2.cwl"),
+ ],
+ stdout=stream,
+ )
+ assert json.loads(stream.getvalue())["secondaryFiles"][0]["secondaryFiles"][0][
+ "location"
+ ].endswith(os.path.join("tests", "wf", "indir1"))
+
+
def test_input_deps() -> None:
stream = StringIO()
@@ -882,6 +888,33 @@ def test_separate_without_prefix() -> None:
factory.make(get_data("tests/wf/separate_without_prefix.cwl"))()
+def test_glob_expr_error(tmp_path: Path) -> None:
+ """Better glob expression error."""
+ error_code, _, stderr = get_main_output(
+ [get_data("tests/wf/1496.cwl"), "--index", str(tmp_path)]
+ )
+ assert error_code != 0
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert "Resolved glob patterns must be strings" in stderr
+
+
+def test_format_expr_error() -> None:
+ """Better format expression error."""
+ error_code, _, stderr = get_main_output(
+ [
+ get_data("tests/wf/bad_formattest.cwl"),
+ get_data("tests/wf/formattest-job.json"),
+ ]
+ )
+ assert error_code != 0
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert (
+ "An expression in the 'format' field must evaluate to a string, or list "
+ "of strings. However a non-string item was received: '42' of "
+ "type ''." in stderr
+ )
+
+
def test_static_checker() -> None:
# check that the static checker raises exception when a source type
# mismatches its sink type.
@@ -1003,16 +1036,12 @@ def test_print_dot() -> None:
assert main(["--print-dot", cwl_path], stdout=stdout) == 0
computed_dot = pydot.graph_from_dot_data(stdout.getvalue())[0]
computed_edges = sorted(
- [
- (urlparse(source).fragment, urlparse(target).fragment)
- for source, target in computed_dot.obj_dict["edges"]
- ]
+ (urlparse(source).fragment, urlparse(target).fragment)
+ for source, target in computed_dot.obj_dict["edges"]
)
expected_edges = sorted(
- [
- (urlparse(source).fragment, urlparse(target).fragment)
- for source, target in expected_dot.obj_dict["edges"]
- ]
+ (urlparse(source).fragment, urlparse(target).fragment)
+ for source, target in expected_dot.obj_dict["edges"]
)
assert computed_edges == expected_edges
@@ -1053,80 +1082,79 @@ def test_no_js_console(factor: str) -> None:
@needs_docker
@pytest.mark.parametrize("factor", test_factors)
-def test_cid_file_dir(tmpdir: py.path.local, factor: str) -> None:
+def test_cid_file_dir(tmp_path: Path, factor: str) -> None:
+ """Test --cidfile-dir option works."""
test_file = "cache_test_workflow.cwl"
- cwd = tmpdir.chdir()
- commands = factor.split()
- commands.extend(["--cidfile-dir", str(tmpdir), get_data("tests/wf/" + test_file)])
- error_code, stdout, stderr = get_main_output(commands)
- assert "completed success" in stderr
- assert error_code == 0
- cidfiles_count = sum(1 for _ in tmpdir.visit(fil="*"))
- assert cidfiles_count == 2
- cwd.chdir()
- tmpdir.remove(ignore_errors=True)
+ with working_directory(tmp_path):
+ commands = factor.split()
+ commands.extend(
+ ["--cidfile-dir", str(tmp_path), get_data("tests/wf/" + test_file)]
+ )
+ error_code, stdout, stderr = get_main_output(commands)
+ assert "completed success" in stderr
+ assert error_code == 0
+ cidfiles_count = sum(1 for _ in tmp_path.glob("**/*"))
+ assert cidfiles_count == 2
@needs_docker
@pytest.mark.parametrize("factor", test_factors)
-def test_cid_file_dir_arg_is_file_instead_of_dir(
- tmpdir: py.path.local, factor: str
-) -> None:
+def test_cid_file_dir_arg_is_file_instead_of_dir(tmp_path: Path, factor: str) -> None:
+ """Test --cidfile-dir with a file produces the correct error."""
test_file = "cache_test_workflow.cwl"
- bad_cidfile_dir = str(tmpdir.ensure("cidfile-dir-actually-a-file"))
+ bad_cidfile_dir = tmp_path / "cidfile-dir-actually-a-file"
+ bad_cidfile_dir.touch()
commands = factor.split()
commands.extend(
- ["--cidfile-dir", bad_cidfile_dir, get_data("tests/wf/" + test_file)]
+ ["--cidfile-dir", str(bad_cidfile_dir), get_data("tests/wf/" + test_file)]
)
error_code, _, stderr = get_main_output(commands)
assert "is not a directory, please check it first" in stderr, stderr
assert error_code == 2 or error_code == 1, stderr
- tmpdir.remove(ignore_errors=True)
@needs_docker
@pytest.mark.parametrize("factor", test_factors)
-def test_cid_file_non_existing_dir(tmpdir: py.path.local, factor: str) -> None:
+def test_cid_file_non_existing_dir(tmp_path: Path, factor: str) -> None:
+ """Test that --cachedir with a bad path should produce a specific error."""
test_file = "cache_test_workflow.cwl"
- bad_cidfile_dir = str(tmpdir.join("cidfile-dir-badpath"))
+ bad_cidfile_dir = tmp_path / "cidfile-dir-badpath"
commands = factor.split()
commands.extend(
[
"--record-container-id",
"--cidfile-dir",
- bad_cidfile_dir,
+ str(bad_cidfile_dir),
get_data("tests/wf/" + test_file),
]
)
error_code, _, stderr = get_main_output(commands)
assert "directory doesn't exist, please create it first" in stderr, stderr
assert error_code == 2 or error_code == 1, stderr
- tmpdir.remove(ignore_errors=True)
@needs_docker
@pytest.mark.parametrize("factor", test_factors)
-def test_cid_file_w_prefix(tmpdir: py.path.local, factor: str) -> None:
+def test_cid_file_w_prefix(tmp_path: Path, factor: str) -> None:
+ """Test that --cidfile-prefix works."""
test_file = "cache_test_workflow.cwl"
- cwd = tmpdir.chdir()
- try:
- commands = factor.split()
- commands.extend(
- [
- "--record-container-id",
- "--cidfile-prefix=pytestcid",
- get_data("tests/wf/" + test_file),
- ]
- )
- error_code, stdout, stderr = get_main_output(commands)
- finally:
- listing = tmpdir.listdir()
- cwd.chdir()
- cidfiles_count = sum(1 for _ in tmpdir.visit(fil="pytestcid*"))
- tmpdir.remove(ignore_errors=True)
+ with working_directory(tmp_path):
+ try:
+ commands = factor.split()
+ commands.extend(
+ [
+ "--record-container-id",
+ "--cidfile-prefix=pytestcid",
+ get_data("tests/wf/" + test_file),
+ ]
+ )
+ error_code, stdout, stderr = get_main_output(commands)
+ finally:
+ listing = tmp_path.iterdir()
+ cidfiles_count = sum(1 for _ in tmp_path.glob("**/pytestcid*"))
assert "completed success" in stderr
assert error_code == 0
- assert cidfiles_count == 2, "{}/n{}".format(listing, stderr)
+ assert cidfiles_count == 2, f"{list(listing)}/n{stderr}"
@needs_docker
@@ -1139,7 +1167,7 @@ def test_secondary_files_v1_1(factor: str) -> None:
commands = factor.split()
commands.extend(
[
- "--enable-dev",
+ "--debug",
get_data(os.path.join("tests", test_file)),
get_data(os.path.join("tests", test_job_file)),
]
@@ -1155,45 +1183,65 @@ def test_secondary_files_v1_1(factor: str) -> None:
@needs_docker
@pytest.mark.parametrize("factor", test_factors)
-def test_secondary_files_v1_0(factor: str) -> None:
+def test_secondary_files_bad_v1_1(factor: str) -> None:
+ """Affirm the correct error message for a bad secondaryFiles expression."""
+ test_file = "secondary-files-bad.cwl"
+ test_job_file = "secondary-files-job.yml"
+ commands = factor.split()
+ commands.extend(
+ [
+ get_data(os.path.join("tests", test_file)),
+ get_data(os.path.join("tests", test_job_file)),
+ ]
+ )
+ error_code, _, stderr = get_main_output(commands)
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert (
+ "The result of a expression in the field 'required' must be a bool "
+ "or None, not a ." in stderr
+ ), stderr
+ assert error_code == 1
+
+
+@needs_docker
+@pytest.mark.parametrize("factor", test_factors)
+def test_secondary_files_v1_0(tmp_path: Path, factor: str) -> None:
+ """Test plain strings under "secondaryFiles"."""
test_file = "secondary-files-string-v1.cwl"
test_job_file = "secondary-files-job.yml"
- try:
- old_umask = os.umask(stat.S_IWOTH) # test run with umask 002
- commands = factor.split()
- commands.extend(
- [
- get_data(os.path.join("tests", test_file)),
- get_data(os.path.join("tests", test_job_file)),
- ]
- )
- error_code, _, stderr = get_main_output(commands)
- finally:
- # 664 in octal, '-rw-rw-r--'
- assert stat.S_IMODE(os.stat("lsout").st_mode) == 436
- os.umask(old_umask) # revert back to original umask
+ commands = factor.split()
+ commands.extend(
+ [
+ "--outdir",
+ str(tmp_path),
+ get_data(os.path.join("tests", test_file)),
+ get_data(os.path.join("tests", test_job_file)),
+ ]
+ )
+ error_code, _, stderr = get_main_output(commands)
assert "completed success" in stderr
assert error_code == 0
@needs_docker
@pytest.mark.parametrize("factor", test_factors)
-def test_wf_without_container(tmpdir: py.path.local, factor: str) -> None:
+def test_wf_without_container(tmp_path: Path, factor: str) -> None:
+ """Confirm that we can run a workflow without a container."""
test_file = "hello-workflow.cwl"
- with temp_dir("cwltool_cache") as cache_dir:
- commands = factor.split()
- commands.extend(
- [
- "--cachedir",
- cache_dir,
- "--outdir",
- str(tmpdir),
- get_data("tests/wf/" + test_file),
- "--usermessage",
- "hello",
- ]
- )
- error_code, _, stderr = get_main_output(commands)
+ cache_dir = str(tmp_path / "cwltool_cache")
+ commands = factor.split()
+ commands.extend(
+ [
+ "--cachedir",
+ cache_dir,
+ "--outdir",
+ str(tmp_path / "outdir"),
+ get_data("tests/wf/" + test_file),
+ "--usermessage",
+ "hello",
+ ]
+ )
+ error_code, _, stderr = get_main_output(commands)
assert "completed success" in stderr
assert error_code == 0
@@ -1201,29 +1249,30 @@ def test_wf_without_container(tmpdir: py.path.local, factor: str) -> None:
@needs_docker
@pytest.mark.parametrize("factor", test_factors)
-def test_issue_740_fixed(factor: str) -> None:
+def test_issue_740_fixed(tmp_path: Path, factor: str) -> None:
+ """Confirm that re-running a particular workflow with caching suceeds."""
test_file = "cache_test_workflow.cwl"
- with temp_dir("cwltool_cache") as cache_dir:
- commands = factor.split()
- commands.extend(["--cachedir", cache_dir, get_data("tests/wf/" + test_file)])
- error_code, _, stderr = get_main_output(commands)
+ cache_dir = str(tmp_path / "cwltool_cache")
+ commands = factor.split()
+ commands.extend(["--cachedir", cache_dir, get_data("tests/wf/" + test_file)])
+ error_code, _, stderr = get_main_output(commands)
- assert "completed success" in stderr
- assert error_code == 0
+ assert "completed success" in stderr
+ assert error_code == 0
- commands = factor.split()
- commands.extend(["--cachedir", cache_dir, get_data("tests/wf/" + test_file)])
- error_code, _, stderr = get_main_output(commands)
+ commands = factor.split()
+ commands.extend(["--cachedir", cache_dir, get_data("tests/wf/" + test_file)])
+ error_code, _, stderr = get_main_output(commands)
- assert "Output of job will be cached in" not in stderr
- assert error_code == 0, stderr
+ assert "Output of job will be cached in" not in stderr
+ assert error_code == 0, stderr
@needs_docker
def test_compute_checksum() -> None:
runtime_context = RuntimeContext()
runtime_context.compute_checksum = True
- runtime_context.use_container = onWindows()
+ runtime_context.use_container = False
factory = cwltool.factory.Factory(runtime_context=runtime_context)
echo = factory.make(get_data("tests/wf/cat-tool.cwl"))
output = echo(
@@ -1236,9 +1285,60 @@ def test_compute_checksum() -> None:
assert result["checksum"] == "sha1$327fc7aedf4f6b69a42a7c8b808dc5a7aff61376"
+def test_bad_stdin_expr_error() -> None:
+ """Confirm that a bad stdin expression gives a useful error."""
+ error_code, _, stderr = get_main_output(
+ [
+ get_data("tests/wf/bad-stdin-expr.cwl"),
+ "--file1",
+ get_data("tests/wf/whale.txt"),
+ ]
+ )
+ assert error_code == 1
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert (
+ "'stdin' expression must return a string or null. Got '1111' for '$(inputs.file1.size)'."
+ in stderr
+ )
+
+
+def test_bad_stderr_expr_error() -> None:
+ """Confirm that a bad stderr expression gives a useful error."""
+ error_code, _, stderr = get_main_output(
+ [
+ get_data("tests/wf/bad-stderr-expr.cwl"),
+ "--file1",
+ get_data("tests/wf/whale.txt"),
+ ]
+ )
+ assert error_code == 1
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert (
+ "'stderr' expression must return a string. Got '1111' for '$(inputs.file1.size)'."
+ in stderr
+ )
+
+
+def test_bad_stdout_expr_error() -> None:
+ """Confirm that a bad stdout expression gives a useful error."""
+ error_code, _, stderr = get_main_output(
+ [
+ get_data("tests/wf/bad-stdout-expr.cwl"),
+ "--file1",
+ get_data("tests/wf/whale.txt"),
+ ]
+ )
+ assert error_code == 1
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert (
+ "'stdout' expression must return a string. Got '1111' for '$(inputs.file1.size)'."
+ in stderr
+ )
+
+
@needs_docker
@pytest.mark.parametrize("factor", test_factors)
-def test_no_compute_chcksum(tmpdir: py.path.local, factor: str) -> None:
+def test_no_compute_chcksum(tmp_path: Path, factor: str) -> None:
test_file = "tests/wf/wc-tool.cwl"
job_file = "tests/wf/wc-job.json"
commands = factor.split()
@@ -1246,7 +1346,7 @@ def test_no_compute_chcksum(tmpdir: py.path.local, factor: str) -> None:
[
"--no-compute-checksum",
"--outdir",
- str(tmpdir),
+ str(tmp_path),
get_data(test_file),
get_data(job_file),
]
@@ -1257,7 +1357,6 @@ def test_no_compute_chcksum(tmpdir: py.path.local, factor: str) -> None:
assert "checksum" not in stdout
-@pytest.mark.skipif(onWindows(), reason="udocker is Linux/macOS only")
@pytest.mark.parametrize("factor", test_factors)
def test_bad_userspace_runtime(factor: str) -> None:
test_file = "tests/wf/wc-tool.cwl"
@@ -1276,7 +1375,6 @@ def test_bad_userspace_runtime(factor: str) -> None:
assert error_code == 1
-@windows_needs_docker
@pytest.mark.parametrize("factor", test_factors)
def test_bad_basecommand(factor: str) -> None:
test_file = "tests/wf/missing-tool.cwl"
@@ -1309,7 +1407,19 @@ def test_v1_0_position_expression(factor: str) -> None:
assert error_code == 1
-@windows_needs_docker
+@pytest.mark.parametrize("factor", test_factors)
+def test_v1_1_position_badexpression(factor: str) -> None:
+ """Test for the correct error for a bad position expression."""
+ test_file = "tests/echo-badposition-expr.cwl"
+ test_job = "tests/echo-position-expr-job.yml"
+ commands = factor.split()
+ commands.extend(["--debug", get_data(test_file), get_data(test_job)])
+ error_code, _, stderr = get_main_output(commands)
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert "expressions must evaluate to an int" in stderr, stderr
+ assert error_code == 1
+
+
@pytest.mark.parametrize("factor", test_factors)
def test_optional_numeric_output_0(factor: str) -> None:
test_file = "tests/wf/optional-numerical-output-0.cwl"
@@ -1323,7 +1433,6 @@ def test_optional_numeric_output_0(factor: str) -> None:
@pytest.mark.parametrize("factor", test_factors)
-@windows_needs_docker
def test_env_filtering(factor: str) -> None:
test_file = "tests/env.cwl"
commands = factor.split()
@@ -1353,9 +1462,7 @@ def test_env_filtering(factor: str) -> None:
assert "completed success" in stderr, (error_code, stdout, stderr)
assert error_code == 0, (error_code, stdout, stderr)
- if onWindows():
- target = 5
- elif sh_name == "dash":
+ if sh_name == "dash":
target = 4
else: # bash adds "SHLVL" and "_" environment variables
target = 6
@@ -1369,7 +1476,6 @@ def test_env_filtering(factor: str) -> None:
assert result == target, (error_code, sh_name, sh_name_err, details, stdout, stderr)
-@windows_needs_docker
def test_v1_0_arg_empty_prefix_separate_false() -> None:
test_file = "tests/arg-empty-prefix-separate-false.cwl"
error_code, stdout, stderr = get_main_output(
@@ -1379,22 +1485,86 @@ def test_v1_0_arg_empty_prefix_separate_false() -> None:
assert error_code == 0
-def test_scatter_output_filenames(tmpdir: py.path.local) -> None:
+def test_scatter_output_filenames(tmp_path: Path) -> None:
"""If a scatter step produces identically named output then confirm that the final output is renamed correctly."""
- cwd = tmpdir.chdir()
- rtc = RuntimeContext()
- rtc.outdir = str(cwd)
- factory = cwltool.factory.Factory(runtime_context=rtc)
- output_names = ["output.txt", "output.txt_2", "output.txt_3"]
- scatter_workflow = factory.make(get_data("tests/scatter_numbers.cwl"))
- result = scatter_workflow(range=3)
- assert isinstance(result, dict)
- assert "output" in result
+ cwd = Path.cwd()
+ with working_directory(tmp_path):
+ rtc = RuntimeContext()
+ rtc.outdir = str(cwd)
+ factory = cwltool.factory.Factory(runtime_context=rtc)
+ output_names = ["output.txt", "output.txt_2", "output.txt_3"]
+ scatter_workflow = factory.make(get_data("tests/scatter_numbers.cwl"))
+ result = scatter_workflow(range=3)
+ assert isinstance(result, dict)
+ assert "output" in result
+
+ locations = sorted(element["location"] for element in result["output"])
+
+ assert (
+ locations[0].endswith("output.txt")
+ and locations[1].endswith("output.txt_2")
+ and locations[2].endswith("output.txt_3")
+ ), f"Locations {locations} do not end with {output_names}"
+
+
+def test_malformed_hints() -> None:
+ """Confirm that empty hints section is caught."""
+ factory = cwltool.factory.Factory()
+ with pytest.raises(
+ ValidationException,
+ match=r".*wc-tool-bad-hints\.cwl:6:1: If 'hints' is\s*present\s*then\s*it\s*must\s*be\s*a\s*list.*",
+ ):
+ factory.make(get_data("tests/wc-tool-bad-hints.cwl"))
+
+
+def test_malformed_reqs() -> None:
+ """Confirm that empty reqs section is caught."""
+ factory = cwltool.factory.Factory()
+ with pytest.raises(
+ ValidationException,
+ match=r".*wc-tool-bad-reqs\.cwl:6:1: If 'requirements' is\s*present\s*then\s*it\s*must\s*be\s*a\s*list.*",
+ ):
+ factory.make(get_data("tests/wc-tool-bad-reqs.cwl"))
- locations = sorted([element["location"] for element in result["output"]])
+def test_arguments_self() -> None:
+ """Confirm that $(self) works in the arguments list."""
+ factory = cwltool.factory.Factory()
+ check = factory.make(get_data("tests/wf/paramref_arguments_self.cwl"))
+ outputs = cast(Dict[str, Any], check())
+ assert "self_review" in outputs
+ assert len(outputs) == 1
assert (
- locations[0].endswith("output.txt")
- and locations[1].endswith("output.txt_2")
- and locations[2].endswith("output.txt_3")
- ), "Locations {} do not end with {}".format(locations, output_names)
+ outputs["self_review"]["checksum"]
+ == "sha1$724ba28f4a9a1b472057ff99511ed393a45552e1"
+ )
+
+
+def test_bad_timelimit_expr() -> None:
+ """Confirm error message for bad timelimit expression."""
+ err_code, _, stderr = get_main_output(
+ [
+ get_data("tests/wf/bad_timelimit.cwl"),
+ ]
+ )
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert (
+ "'timelimit' expression must evaluate to a long/int. "
+ "Got '42' for expression '${return \"42\";}" in stderr
+ )
+ assert err_code == 1
+
+
+def test_bad_networkaccess_expr() -> None:
+ """Confirm error message for bad networkaccess expression."""
+ err_code, _, stderr = get_main_output(
+ [
+ get_data("tests/wf/bad_networkaccess.cwl"),
+ ]
+ )
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert (
+ "'networkAccess' expression must evaluate to a bool. "
+ "Got '42' for expression '${return 42;}" in stderr
+ )
+ assert err_code == 1
diff --git a/tests/test_ext.py b/tests/test_ext.py
index 8959a252a7..1eb4091e5a 100644
--- a/tests/test_ext.py
+++ b/tests/test_ext.py
@@ -1,14 +1,14 @@
import os
import re
from io import StringIO
+from pathlib import Path
-import py.path
import pytest
import cwltool.process
from cwltool.main import main
-from .util import get_data, needs_docker, temp_dir, windows_needs_docker
+from .util import get_data, needs_docker
@needs_docker
@@ -31,20 +31,14 @@ def test_listing_deep() -> None:
@needs_docker
-def test_cwltool_options() -> None:
- try:
- opt = os.environ.get("CWLTOOL_OPTIONS")
- os.environ["CWLTOOL_OPTIONS"] = "--enable-ext"
- params = [
- get_data("tests/wf/listing_deep.cwl"),
- get_data("tests/listing-job.yml"),
- ]
- assert main(params) == 0
- finally:
- if opt is not None:
- os.environ["CWLTOOL_OPTIONS"] = opt
- else:
- del os.environ["CWLTOOL_OPTIONS"]
+def test_cwltool_options(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Check setting options via environment variable."""
+ monkeypatch.setenv("CWLTOOL_OPTIONS", "--enable-ext")
+ params = [
+ get_data("tests/wf/listing_deep.cwl"),
+ get_data("tests/listing-job.yml"),
+ ]
+ assert main(params) == 0
@needs_docker
@@ -90,162 +84,168 @@ def test_listing_v1_1() -> None:
@needs_docker
-def test_double_overwrite(tmpdir: py.path.local) -> None:
- with temp_dir() as tmp:
- tmp_name = os.path.join(tmp, "value")
+def test_double_overwrite(tmp_path: Path) -> None:
+ """Test that overwriting an input using cwltool:InplaceUpdateRequirement works."""
+ tmp_name = str(tmp_path / "value")
- before_value, expected_value = "1", "3"
+ before_value, expected_value = "1", "3"
- with open(tmp_name, "w") as f:
- f.write(before_value)
+ with open(tmp_name, "w") as f:
+ f.write(before_value)
- assert (
- main(
- [
- "--enable-ext",
- "--outdir",
- str(tmpdir),
- get_data("tests/wf/mut2.cwl"),
- "-a",
- tmp_name,
- ]
- )
- == 0
+ assert (
+ main(
+ [
+ "--enable-ext",
+ "--outdir",
+ str(tmp_path / "outdir"),
+ get_data("tests/wf/mut2.cwl"),
+ "-a",
+ tmp_name,
+ ]
)
+ == 0
+ )
- with open(tmp_name, "r") as f:
- actual_value = f.read()
+ with open(tmp_name) as f:
+ actual_value = f.read()
- assert actual_value == expected_value
+ assert actual_value == expected_value
@needs_docker
-def test_disable_file_overwrite_without_ext() -> None:
- with temp_dir() as tmp:
- with temp_dir() as out:
- tmp_name = os.path.join(tmp, "value")
- out_name = os.path.join(out, "value")
-
- before_value, expected_value = "1", "2"
-
- with open(tmp_name, "w") as f:
- f.write(before_value)
-
- assert (
- main(
- [
- "--outdir",
- out,
- get_data("tests/wf/updateval.cwl"),
- "-r",
- tmp_name,
- ]
- )
- == 0
- )
-
- with open(tmp_name, "r") as f:
- tmp_value = f.read()
- with open(out_name, "r") as f:
- out_value = f.read()
-
- assert tmp_value == before_value
- assert out_value == expected_value
+def test_disable_file_overwrite_without_ext(tmp_path: Path) -> None:
+ """Test that overwriting an input using an unprefixed InplaceUpdateRequirement works."""
+ tmpdir = tmp_path / "tmp"
+ tmpdir.mkdir()
+ tmp_name = tmpdir / "value"
+ outdir = tmp_path / "out"
+ outdir.mkdir()
+ out_name = outdir / "value"
+ before_value, expected_value = "1", "2"
+
+ with open(tmp_name, "w") as f:
+ f.write(before_value)
+ assert (
+ main(
+ [
+ "--outdir",
+ str(outdir),
+ get_data("tests/wf/updateval.cwl"),
+ "-r",
+ str(tmp_name),
+ ]
+ )
+ == 0
+ )
-@needs_docker
-def test_disable_dir_overwrite_without_ext() -> None:
- with temp_dir() as tmp:
- with temp_dir() as out:
-
- assert (
- main(["--outdir", out, get_data("tests/wf/updatedir.cwl"), "-r", tmp])
- == 0
- )
+ with open(tmp_name) as f:
+ tmp_value = f.read()
+ with open(out_name) as f:
+ out_value = f.read()
- assert not os.listdir(tmp)
- assert os.listdir(out)
+ assert tmp_value == before_value
+ assert out_value == expected_value
@needs_docker
-def test_disable_file_creation_in_outdir_with_ext() -> None:
- with temp_dir() as tmp:
- with temp_dir() as out:
+def test_disable_dir_overwrite_without_ext(tmp_path: Path) -> None:
+ """Test that we can write into a "writable" input Directory w/o ext."""
+ tmp = tmp_path / "tmp"
+ out = tmp_path / "outdir"
+ tmp.mkdir()
+ out.mkdir()
+ assert (
+ main(["--outdir", str(out), get_data("tests/wf/updatedir.cwl"), "-r", str(tmp)])
+ == 0
+ )
- tmp_name = os.path.join(tmp, "value")
- out_name = os.path.join(out, "value")
+ assert not os.listdir(tmp)
+ assert os.listdir(out)
- before_value, expected_value = "1", "2"
- with open(tmp_name, "w") as f:
- f.write(before_value)
+@needs_docker
+def test_disable_file_creation_in_outdir_with_ext(tmp_path: Path) -> None:
+ tmp = tmp_path / "tmp"
+ tmp.mkdir()
+ out = tmp_path / "outdir"
+ tmp_name = tmp / "value"
+ out_name = out / "value"
- params = [
- "--enable-ext",
- "--leave-outputs",
- "--outdir",
- out,
- get_data("tests/wf/updateval_inplace.cwl"),
- "-r",
- tmp_name,
- ]
- assert main(params) == 0
+ before_value, expected_value = "1", "2"
+
+ with open(tmp_name, "w") as f:
+ f.write(before_value)
+
+ params = [
+ "--enable-ext",
+ "--leave-outputs",
+ "--outdir",
+ str(out),
+ get_data("tests/wf/updateval_inplace.cwl"),
+ "-r",
+ str(tmp_name),
+ ]
+ assert main(params) == 0
- with open(tmp_name, "r") as f:
- tmp_value = f.read()
+ with open(tmp_name) as f:
+ tmp_value = f.read()
- assert tmp_value == expected_value
- assert not os.path.exists(out_name)
+ assert tmp_value == expected_value
+ assert not out_name.exists()
@needs_docker
-def test_disable_dir_creation_in_outdir_with_ext() -> None:
- with temp_dir() as tmp:
- with temp_dir() as out:
- params = [
- "--enable-ext",
- "--leave-outputs",
- "--outdir",
- out,
- get_data("tests/wf/updatedir_inplace.cwl"),
- "-r",
- tmp,
- ]
- assert main(params) == 0
+def test_disable_dir_creation_in_outdir_with_ext(tmp_path: Path) -> None:
+ tmp = tmp_path / "tmp"
+ tmp.mkdir()
+ out = tmp_path / "outdir"
+ out.mkdir()
+ params = [
+ "--enable-ext",
+ "--leave-outputs",
+ "--outdir",
+ str(out),
+ get_data("tests/wf/updatedir_inplace.cwl"),
+ "-r",
+ str(tmp),
+ ]
+ assert main(params) == 0
- assert os.listdir(tmp)
- assert not os.listdir(out)
+ assert os.listdir(tmp)
+ assert not os.listdir(out)
@needs_docker
-def test_write_write_conflict() -> None:
- with temp_dir("tmp") as tmp:
- tmp_name = os.path.join(tmp, "value")
+def test_write_write_conflict(tmp_path: Path) -> None:
+ tmp_name = tmp_path / "value"
- before_value, expected_value = "1", "2"
+ before_value, expected_value = "1", "2"
- with open(tmp_name, "w") as f:
- f.write(before_value)
+ with open(tmp_name, "w") as f:
+ f.write(before_value)
- assert main(["--enable-ext", get_data("tests/wf/mut.cwl"), "-a", tmp_name]) != 0
+ assert (
+ main(["--enable-ext", get_data("tests/wf/mut.cwl"), "-a", str(tmp_name)]) != 0
+ )
- with open(tmp_name, "r") as f:
- tmp_value = f.read()
+ with open(tmp_name) as f:
+ tmp_value = f.read()
- assert tmp_value == expected_value
+ assert tmp_value == expected_value
@pytest.mark.skip(reason="This test is non-deterministic")
-def test_read_write_conflict() -> None:
- with temp_dir("tmp") as tmp:
- tmp_name = os.path.join(tmp, "value")
+def test_read_write_conflict(tmp_path: Path) -> None:
+ tmp_name = tmp_path / "value"
- with open(tmp_name, "w") as f:
- f.write("1")
+ with open(tmp_name, "w") as f:
+ f.write("1")
- assert (
- main(["--enable-ext", get_data("tests/wf/mut3.cwl"), "-a", tmp_name]) != 0
- )
+ assert (
+ main(["--enable-ext", get_data("tests/wf/mut3.cwl"), "-a", str(tmp_name)]) != 0
+ )
@needs_docker
@@ -256,13 +256,13 @@ def test_require_prefix_networkaccess() -> None:
@needs_docker
-def test_require_prefix_workreuse(tmpdir: py.path.local) -> None:
+def test_require_prefix_workreuse(tmp_path: Path) -> None:
assert (
main(
[
"--enable-ext",
"--outdir",
- str(tmpdir),
+ str(tmp_path),
get_data("tests/wf/workreuse.cwl"),
]
)
@@ -272,7 +272,6 @@ def test_require_prefix_workreuse(tmpdir: py.path.local) -> None:
assert main(["--enable-ext", get_data("tests/wf/workreuse-fail.cwl")]) != 0
-@windows_needs_docker
def test_require_prefix_timelimit() -> None:
assert main(["--enable-ext", get_data("tests/wf/timelimit.cwl")]) == 0
assert main([get_data("tests/wf/timelimit.cwl")]) != 0
diff --git a/tests/test_fetch.py b/tests/test_fetch.py
index 57cb3186b5..2c06bbfc1b 100644
--- a/tests/test_fetch.py
+++ b/tests/test_fetch.py
@@ -1,6 +1,6 @@
import os
from pathlib import Path
-from typing import Any, Optional
+from typing import Any, List, Optional
from urllib.parse import urljoin, urlsplit
import pytest
@@ -12,7 +12,6 @@
from cwltool.load_tool import load_tool
from cwltool.main import main
from cwltool.resolver import resolve_local
-from cwltool.utils import onWindows
from cwltool.workflow import default_make_tool
from .util import get_data, working_directory
@@ -26,7 +25,7 @@ def __init__(
) -> None:
"""Create a Fetcher that provides a fixed result for testing purposes."""
- def fetch_text(self, url): # type: (str) -> str
+ def fetch_text(self, url: str, content_types: Optional[List[str]] = None) -> str:
if url == "baz:bar/foo.cwl":
return """
cwlVersion: v1.0
@@ -82,16 +81,10 @@ def test_resolver(d: Any, a: str) -> str:
]
-def norm(uri: str) -> str:
- if onWindows():
- return uri.lower()
- return uri
-
-
@pytest.mark.parametrize("path,expected_path", path_fragments)
def test_resolve_local(path: str, expected_path: str) -> None:
with working_directory(root):
- expected = norm(root.as_uri() + expected_path)
+ expected = root.as_uri() + expected_path
resolved = resolve_local(None, path)
assert resolved
- assert norm(resolved) == expected
+ assert resolved == expected
diff --git a/tests/test_http_input.py b/tests/test_http_input.py
index 4c967f7367..ac0e3fd969 100644
--- a/tests/test_http_input.py
+++ b/tests/test_http_input.py
@@ -1,23 +1,22 @@
import os
-import tempfile
+from pathlib import Path
from typing import List
from cwltool.pathmapper import PathMapper
from cwltool.utils import CWLObjectType
-def test_http_path_mapping() -> None:
+def test_http_path_mapping(tmp_path: Path) -> None:
input_file_path = "https://raw.githubusercontent.com/common-workflow-language/cwltool/main/tests/2.fasta"
- tempdir = tempfile.mkdtemp()
- base_file = [
+ base_file: List[CWLObjectType] = [
{
"class": "File",
"location": "https://raw.githubusercontent.com/common-workflow-language/cwltool/main/tests/2.fasta",
"basename": "chr20.fa",
}
- ] # type: List[CWLObjectType]
- pathmap = PathMapper(base_file, os.getcwd(), tempdir)._pathmap
+ ]
+ pathmap = PathMapper(base_file, os.getcwd(), str(tmp_path))._pathmap
assert input_file_path in pathmap
assert os.path.exists(pathmap[input_file_path].resolved)
diff --git a/tests/test_iwdr.py b/tests/test_iwdr.py
index 84d76fa0d6..48712da230 100644
--- a/tests/test_iwdr.py
+++ b/tests/test_iwdr.py
@@ -1,24 +1,19 @@
-import os
-import tempfile
+"""InitialWorkDirRequirement related tests."""
+import json
+import re
from pathlib import Path
+from stat import S_IWGRP, S_IWOTH, S_IWRITE
+from typing import Any
+from cwltool.factory import Factory
from cwltool.main import main
-from .util import (
- get_data,
- get_main_output,
- get_windows_safe_factory,
- needs_docker,
- needs_singularity,
- temp_dir,
- windows_needs_docker,
-)
+from .util import get_data, get_main_output, needs_docker, needs_singularity
-@windows_needs_docker
def test_newline_in_entry() -> None:
"""Files in a InitialWorkingDirectory are created with a newline character."""
- factory = get_windows_safe_factory()
+ factory = Factory()
echo = factory.make(get_data("tests/wf/iwdr-entry.cwl"))
assert echo(message="hello") == {"out": "CONFIGVAR=hello\n"}
@@ -44,189 +39,306 @@ def test_directory_literal_with_real_inputs_inside(tmp_path: Path) -> None:
assert err_code == 0
+def test_bad_listing_expression(tmp_path: Path) -> None:
+ """Confirm better error message for bad listing expression."""
+ err_code, _, stderr = get_main_output(
+ [
+ "--out",
+ str(tmp_path),
+ get_data("tests/iwdr_bad_expr.cwl"),
+ "--example={}".format(get_data("tests/__init__.py")),
+ ]
+ )
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert (
+ "Expression in a 'InitialWorkdirRequirement.listing' field must return "
+ "a list containing zero or more of: File or Directory objects; Dirent "
+ "objects. Got '42' among the results" in stderr
+ )
+ assert err_code == 1
+
+
@needs_docker
-def test_iwdr_permutations() -> None:
- saved_tempdir = tempfile.tempdir
- with temp_dir() as misc:
- tempfile.tempdir = os.path.realpath(misc)
- with temp_dir() as fifth:
- with temp_dir() as sixth:
- with temp_dir() as seventh:
- with temp_dir() as eighth:
- with tempfile.NamedTemporaryFile() as first:
- with tempfile.NamedTemporaryFile() as second:
- with tempfile.NamedTemporaryFile() as third:
- with tempfile.NamedTemporaryFile() as fourth:
- with temp_dir() as outdir:
- assert (
- main(
- [
- "--outdir",
- outdir,
- "--enable-dev",
- get_data(
- "tests/wf/iwdr_permutations.cwl"
- ),
- "--first",
- first.name,
- "--second",
- second.name,
- "--third",
- third.name,
- "--fourth",
- fourth.name,
- "--fifth",
- fifth,
- "--sixth",
- sixth,
- "--seventh",
- seventh,
- "--eighth",
- eighth,
- ]
- )
- == 0
- )
- tempfile.tempdir = saved_tempdir
+def test_iwdr_permutations(tmp_path_factory: Any) -> None:
+ misc = tmp_path_factory.mktemp("misc")
+ fifth = misc / "fifth"
+ fifth.mkdir()
+ sixth = misc / "sixth"
+ sixth.mkdir()
+ seventh = misc / "seventh"
+ seventh.mkdir()
+ eighth = misc / "eighth"
+ eighth.mkdir()
+ first = misc / "first"
+ first.touch()
+ second = misc / "second"
+ second.touch()
+ third = misc / "third"
+ third.touch()
+ fourth = misc / "fourth"
+ fourth.touch()
+ eleventh = misc / "eleventh"
+ eleventh.touch()
+ twelfth = misc / "twelfth"
+ twelfth.touch()
+ outdir = str(tmp_path_factory.mktemp("outdir"))
+ err_code, stdout, _ = get_main_output(
+ [
+ "--outdir",
+ outdir,
+ "--debug",
+ get_data("tests/wf/iwdr_permutations.cwl"),
+ "--first",
+ str(first),
+ "--second",
+ str(second),
+ "--third",
+ str(third),
+ "--fourth",
+ str(fourth),
+ "--fifth",
+ str(fifth),
+ "--sixth",
+ str(sixth),
+ "--seventh",
+ str(seventh),
+ "--eighth",
+ str(eighth),
+ "--eleventh",
+ str(eleventh),
+ "--eleventh",
+ str(twelfth),
+ ]
+ )
+ assert err_code == 0
+ log = json.loads(stdout)["log"]
+ assert log["checksum"] == "sha1$bc51ebb3f65ca44282789dd1e6de9747d8abe75f", log
+
+
+def test_iwdr_permutations_readonly(tmp_path_factory: Any) -> None:
+ """Confirm that readonly input files are properly made writable."""
+ misc = tmp_path_factory.mktemp("misc")
+ fifth = misc / "fifth"
+ fifth.mkdir()
+ sixth = misc / "sixth"
+ sixth.mkdir()
+ fifth_file = fifth / "bar"
+ fifth_dir = fifth / "foo"
+ fifth_file.touch()
+ fifth_dir.mkdir()
+ sixth = tmp_path_factory.mktemp("sixth")
+ first = misc / "first"
+ first.touch()
+ second = misc / "second"
+ second.touch()
+ outdir = str(tmp_path_factory.mktemp("outdir"))
+ for entry in [first, second, fifth, sixth, fifth_file, fifth_dir]:
+ mode = entry.stat().st_mode
+ ro_mask = 0o777 ^ (S_IWRITE | S_IWGRP | S_IWOTH)
+ entry.chmod(mode & ro_mask)
+ assert (
+ main(
+ [
+ "--no-container",
+ "--debug",
+ "--leave-outputs",
+ "--outdir",
+ outdir,
+ get_data("tests/wf/iwdr_permutations_nocontainer.cwl"),
+ "--first",
+ str(first),
+ "--second",
+ str(second),
+ "--fifth",
+ str(fifth),
+ "--sixth",
+ str(sixth),
+ ]
+ )
+ == 0
+ )
+ for entry in [first, second, fifth, sixth, fifth_file, fifth_dir]:
+ try:
+ mode = entry.stat().st_mode
+ entry.chmod(mode | S_IWRITE)
+ except PermissionError:
+ pass
@needs_docker
-def test_iwdr_permutations_inplace() -> None:
- saved_tempdir = tempfile.tempdir
- with temp_dir() as misc:
- tempfile.tempdir = os.path.realpath(misc)
- with temp_dir() as fifth:
- with temp_dir() as sixth:
- with temp_dir() as seventh:
- with temp_dir() as eighth:
- with tempfile.NamedTemporaryFile() as first:
- with tempfile.NamedTemporaryFile() as second:
- with tempfile.NamedTemporaryFile() as third:
- with tempfile.NamedTemporaryFile() as fourth:
- with temp_dir() as outdir:
- assert (
- main(
- [
- "--outdir",
- outdir,
- "--enable-ext",
- "--enable-dev",
- "--overrides",
- get_data(
- "tests/wf/iwdr_permutations_inplace.yml"
- ),
- get_data(
- "tests/wf/iwdr_permutations.cwl"
- ),
- "--first",
- first.name,
- "--second",
- second.name,
- "--third",
- third.name,
- "--fourth",
- fourth.name,
- "--fifth",
- fifth,
- "--sixth",
- sixth,
- "--seventh",
- seventh,
- "--eighth",
- eighth,
- ]
- )
- == 0
- )
- tempfile.tempdir = saved_tempdir
+def test_iwdr_permutations_inplace(tmp_path_factory: Any) -> None:
+ misc = tmp_path_factory.mktemp("misc")
+ fifth = misc / "fifth"
+ fifth.mkdir()
+ sixth = misc / "sixth"
+ sixth.mkdir()
+ seventh = misc / "seventh"
+ seventh.mkdir()
+ eighth = misc / "eighth"
+ eighth.mkdir()
+ first = misc / "first"
+ first.touch()
+ second = misc / "second"
+ second.touch()
+ third = misc / "third"
+ third.touch()
+ fourth = misc / "fourth"
+ fourth.touch()
+ eleventh = misc / "eleventh"
+ eleventh.touch()
+ twelfth = misc / "twelfth"
+ twelfth.touch()
+ outdir = str(tmp_path_factory.mktemp("outdir"))
+ err_code, stdout, _ = get_main_output(
+ [
+ "--outdir",
+ outdir,
+ "--enable-ext",
+ "--overrides",
+ get_data("tests/wf/iwdr_permutations_inplace.yml"),
+ get_data("tests/wf/iwdr_permutations.cwl"),
+ "--first",
+ str(first),
+ "--second",
+ str(second),
+ "--third",
+ str(third),
+ "--fourth",
+ str(fourth),
+ "--fifth",
+ str(fifth),
+ "--sixth",
+ str(sixth),
+ "--seventh",
+ str(seventh),
+ "--eighth",
+ str(eighth),
+ "--eleventh",
+ str(eleventh),
+ "--eleventh",
+ str(twelfth),
+ ]
+ )
+ assert err_code == 0
+ log = json.loads(stdout)["log"]
+ assert log["checksum"] == "sha1$bc51ebb3f65ca44282789dd1e6de9747d8abe75f", log
@needs_singularity
-def test_iwdr_permutations_singularity() -> None:
- with temp_dir() as fifth:
- with temp_dir() as sixth:
- with temp_dir() as seventh:
- with temp_dir() as eighth:
- with tempfile.NamedTemporaryFile() as first:
- with tempfile.NamedTemporaryFile() as second:
- with tempfile.NamedTemporaryFile() as third:
- with tempfile.NamedTemporaryFile() as fourth:
- with temp_dir() as outdir:
- assert (
- main(
- [
- "--outdir",
- outdir,
- "--enable-dev",
- "--singularity",
- get_data(
- "tests/wf/iwdr_permutations.cwl"
- ),
- "--first",
- first.name,
- "--second",
- second.name,
- "--third",
- third.name,
- "--fourth",
- fourth.name,
- "--fifth",
- fifth,
- "--sixth",
- sixth,
- "--seventh",
- seventh,
- "--eighth",
- eighth,
- ]
- )
- == 0
- )
+def test_iwdr_permutations_singularity(tmp_path_factory: Any) -> None:
+ misc = tmp_path_factory.mktemp("misc")
+ fifth = misc / "fifth"
+ fifth.mkdir()
+ sixth = misc / "sixth"
+ sixth.mkdir()
+ seventh = misc / "seventh"
+ seventh.mkdir()
+ eighth = misc / "eighth"
+ eighth.mkdir()
+ first = misc / "first"
+ first.touch()
+ second = misc / "second"
+ second.touch()
+ third = misc / "third"
+ third.touch()
+ fourth = misc / "fourth"
+ fourth.touch()
+ eleventh = misc / "eleventh"
+ eleventh.touch()
+ twelfth = misc / "twelfth"
+ twelfth.touch()
+ outdir = str(tmp_path_factory.mktemp("outdir"))
+ err_code, stdout, _ = get_main_output(
+ [
+ "--outdir",
+ outdir,
+ "--debug",
+ "--singularity",
+ get_data("tests/wf/iwdr_permutations.cwl"),
+ "--first",
+ str(first),
+ "--second",
+ str(second),
+ "--third",
+ str(third),
+ "--fourth",
+ str(fourth),
+ "--fifth",
+ str(fifth),
+ "--sixth",
+ str(sixth),
+ "--seventh",
+ str(seventh),
+ "--eighth",
+ str(eighth),
+ "--eleventh",
+ str(eleventh),
+ "--eleventh",
+ str(twelfth),
+ ]
+ )
+ assert err_code == 0
+ log = json.loads(stdout)["log"]
+ assert log["checksum"] == "sha1$bc51ebb3f65ca44282789dd1e6de9747d8abe75f", log
@needs_singularity
-def test_iwdr_permutations_singularity_inplace() -> None:
- with temp_dir() as fifth:
- with temp_dir() as sixth:
- with temp_dir() as seventh:
- with temp_dir() as eighth:
- with tempfile.NamedTemporaryFile() as first:
- with tempfile.NamedTemporaryFile() as second:
- with tempfile.NamedTemporaryFile() as third:
- with tempfile.NamedTemporaryFile() as fourth:
- with temp_dir() as outdir:
- assert (
- main(
- [
- "--outdir",
- outdir,
- "--singularity",
- "--enable-ext",
- "--enable-dev",
- "--overrides",
- get_data(
- "tests/wf/iwdr_permutations_inplace.yml"
- ),
- get_data(
- "tests/wf/iwdr_permutations.cwl"
- ),
- "--first",
- first.name,
- "--second",
- second.name,
- "--third",
- third.name,
- "--fourth",
- fourth.name,
- "--fifth",
- fifth,
- "--sixth",
- sixth,
- "--seventh",
- seventh,
- "--eighth",
- eighth,
- ]
- )
- == 0
- )
+def test_iwdr_permutations_singularity_inplace(tmp_path_factory: Any) -> None:
+ """IWDR tests using --singularity and a forced InplaceUpdateRequirement."""
+ misc = tmp_path_factory.mktemp("misc")
+ fifth = misc / "fifth"
+ fifth.mkdir()
+ sixth = misc / "sixth"
+ sixth.mkdir()
+ seventh = misc / "seventh"
+ seventh.mkdir()
+ eighth = misc / "eighth"
+ eighth.mkdir()
+ first = misc / "first"
+ first.touch()
+ second = misc / "second"
+ second.touch()
+ third = misc / "third"
+ third.touch()
+ fourth = misc / "fourth"
+ fourth.touch()
+ eleventh = misc / "eleventh"
+ eleventh.touch()
+ twelfth = misc / "twelfth"
+ twelfth.touch()
+ outdir = str(tmp_path_factory.mktemp("outdir"))
+ assert (
+ main(
+ [
+ "--outdir",
+ outdir,
+ "--singularity",
+ "--enable-ext",
+ "--enable-dev",
+ "--overrides",
+ get_data("tests/wf/iwdr_permutations_inplace.yml"),
+ get_data("tests/wf/iwdr_permutations.cwl"),
+ "--first",
+ str(first),
+ "--second",
+ str(second),
+ "--third",
+ str(third),
+ "--fourth",
+ str(fourth),
+ "--fifth",
+ str(fifth),
+ "--sixth",
+ str(sixth),
+ "--seventh",
+ str(seventh),
+ "--eighth",
+ str(eighth),
+ "--eleventh",
+ str(eleventh),
+ "--eleventh",
+ str(twelfth),
+ ]
+ )
+ == 0
+ )
diff --git a/tests/test_js_sandbox.py b/tests/test_js_sandbox.py
index 21a65b468a..4efc302cd2 100644
--- a/tests/test_js_sandbox.py
+++ b/tests/test_js_sandbox.py
@@ -3,9 +3,9 @@
import pytest
from cwltool import sandboxjs
-from cwltool.utils import onWindows
+from cwltool.factory import Factory
-from .util import get_data, get_windows_safe_factory, windows_needs_docker
+from .util import get_data
node_versions = [
("v0.8.26\n", False),
@@ -24,18 +24,14 @@ def test_node_version(version: str, supported: bool, mocker: Any) -> None:
assert sandboxjs.check_js_threshold_version("node") == supported
-@windows_needs_docker
def test_value_from_two_concatenated_expressions() -> None:
- factory = get_windows_safe_factory()
+ factory = Factory()
echo = factory.make(get_data("tests/wf/vf-concat.cwl"))
file = {"class": "File", "location": get_data("tests/wf/whale.txt")}
assert echo(file1=file) == {"out": "a string\n"}
-@pytest.mark.skipif(
- onWindows(), reason="Caching processes for windows is not supported."
-)
def test_caches_js_processes(mocker: Any) -> None:
sandboxjs.exec_js_process("7", context="{}")
diff --git a/tests/test_load_tool.py b/tests/test_load_tool.py
index 0d4c7e05e4..be2f9d7c3d 100644
--- a/tests/test_load_tool.py
+++ b/tests/test_load_tool.py
@@ -9,11 +9,9 @@
from cwltool.update import INTERNAL_VERSION
from cwltool.utils import CWLObjectType
-from .test_fetch import norm
-from .util import get_data, windows_needs_docker
+from .util import get_data
-@windows_needs_docker
def test_check_version() -> None:
"""
It is permitted to load without updating, but not execute.
@@ -23,13 +21,13 @@ def test_check_version() -> None:
joborder = {"inp": "abc"} # type: CWLObjectType
loadingContext = LoadingContext({"do_update": True})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
- for j in tool.job(joborder, None, RuntimeContext()):
+ for _ in tool.job(joborder, None, RuntimeContext()):
pass
loadingContext = LoadingContext({"do_update": False})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
with pytest.raises(WorkflowException):
- for j in tool.job(joborder, None, RuntimeContext()):
+ for _ in tool.job(joborder, None, RuntimeContext()):
pass
@@ -48,17 +46,16 @@ def test_use_metadata() -> None:
def test_checklink_outputSource() -> None:
"""Is outputSource resolved correctly independent of value of do_validate."""
outsrc = (
- norm(Path(get_data("tests/wf/1st-workflow.cwl")).as_uri())
- + "#argument/classfile"
+ Path(get_data("tests/wf/1st-workflow.cwl")).as_uri() + "#argument/classfile"
)
loadingContext = LoadingContext({"do_validate": True})
tool = load_tool(get_data("tests/wf/1st-workflow.cwl"), loadingContext)
- assert norm(tool.tool["outputs"][0]["outputSource"]) == outsrc
+ assert tool.tool["outputs"][0]["outputSource"] == outsrc
loadingContext = LoadingContext({"do_validate": False})
tool = load_tool(get_data("tests/wf/1st-workflow.cwl"), loadingContext)
- assert norm(tool.tool["outputs"][0]["outputSource"]) == outsrc
+ assert tool.tool["outputs"][0]["outputSource"] == outsrc
def test_load_graph_fragment() -> None:
@@ -84,7 +81,7 @@ def test_load_graph_fragment_from_packed() -> None:
loadingContext = LoadingContext()
uri = Path(get_data("tests/wf/packed-with-loadlisting.cwl")).as_uri() + "#main"
try:
- with open(get_data("cwltool/extensions.yml"), "r") as res:
+ with open(get_data("cwltool/extensions.yml")) as res:
use_custom_schema("v1.0", "http://commonwl.org/cwltool", res.read())
# The updater transforms LoadListingRequirement from an
diff --git a/tests/test_mpi.py b/tests/test_mpi.py
index 6e7a2a6c16..2ca0823d72 100644
--- a/tests/test_mpi.py
+++ b/tests/test_mpi.py
@@ -8,9 +8,9 @@
import pkg_resources
import pytest
-from ruamel import yaml
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from schema_salad.avro.schema import Names
+from schema_salad.utils import yaml_no_ts
import cwltool.load_tool
import cwltool.singularity
@@ -20,7 +20,7 @@
from cwltool.main import main
from cwltool.mpi import MpiConfig, MPIRequirementName
-from .util import get_data, windows_needs_docker, working_directory
+from .util import get_data, working_directory
def test_mpi_conf_defaults() -> None:
@@ -104,7 +104,8 @@ def run_many(self, n: int, args: List[str]):
"env_pass": ["USER"],
}
plat_conf_file = mpitmp / "plat_mpi.yml"
- plat_conf_file.write_text(yaml.round_trip_dump(plat_conf))
+ yaml = yaml_no_ts()
+ yaml.dump(plat_conf, plat_conf_file)
yield str(plat_conf_file)
@@ -134,7 +135,6 @@ def test_fake_mpi_config(self, fake_mpi_conf: str) -> None:
assert conf_obj.default_nproc == 1
assert conf_obj.extra_flags == ["--no-fail"]
- @windows_needs_docker
def test_simple_mpi_tool(self, fake_mpi_conf: str, tmp_path: Path) -> None:
stdout = StringIO()
stderr = StringIO()
@@ -153,7 +153,6 @@ def test_simple_mpi_tool(self, fake_mpi_conf: str, tmp_path: Path) -> None:
pids = [int(line) for line in pidfile]
assert len(pids) == 2
- @windows_needs_docker
def test_simple_mpi_nproc_expr(self, fake_mpi_conf: str, tmp_path: Path) -> None:
np = 4
input_file = make_processes_input(np, tmp_path)
@@ -174,7 +173,6 @@ def test_simple_mpi_nproc_expr(self, fake_mpi_conf: str, tmp_path: Path) -> None
pids = [int(line) for line in pidfile]
assert len(pids) == np
- @windows_needs_docker
def test_mpi_workflow(self, fake_mpi_conf: str, tmp_path: Path) -> None:
np = 3
input_file = make_processes_input(np, tmp_path)
@@ -195,9 +193,8 @@ def test_mpi_workflow(self, fake_mpi_conf: str, tmp_path: Path) -> None:
lc = int(lc_file.read())
assert lc == np
- @windows_needs_docker
def test_environment(
- self, fake_mpi_conf: str, tmp_path: Path, monkeypatch: Any
+ self, fake_mpi_conf: str, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
stdout = StringIO()
stderr = StringIO()
@@ -221,13 +218,14 @@ def test_environment(
assert e["TEST_MPI_FOO"] == "bar"
-def test_env_passing(monkeypatch: Any) -> None:
+def test_env_passing(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Confirm that MPI extension passes environment variables correctly."""
config = MpiConfig(
env_pass=["A", "B", "LONG_NAME"],
env_pass_regex=["TOOLNAME", "MPI_.*_CONF"],
)
- env = {} # type: MutableMapping[str, str]
+ env: MutableMapping[str, str] = {}
with monkeypatch.context() as m:
m.setattr(os, "environ", {})
diff --git a/tests/test_pack.py b/tests/test_pack.py
index a32479f189..7fa6e796dc 100644
--- a/tests/test_pack.py
+++ b/tests/test_pack.py
@@ -4,12 +4,11 @@
from collections.abc import Sized
from functools import partial
from io import StringIO
-from tempfile import NamedTemporaryFile
-from typing import Dict
+from pathlib import Path
+from typing import Any, Dict, List, cast
-import py.path
import pytest
-from ruamel import yaml
+from schema_salad.utils import yaml_no_ts
import cwltool.pack
import cwltool.workflow
@@ -22,55 +21,46 @@
from .util import get_data, needs_docker
-def test_pack() -> None:
- loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/revsort.cwl"))
-
- with open(get_data("tests/wf/expect_packed.cwl")) as packed_file:
- expect_packed = yaml.main.safe_load(packed_file)
-
- packed = cwltool.pack.pack(loadingContext, uri)
- adjustFileObjs(
- packed, partial(make_relative, os.path.abspath(get_data("tests/wf")))
- )
- adjustDirObjs(packed, partial(make_relative, os.path.abspath(get_data("tests/wf"))))
-
- assert "$schemas" in packed
- packed_schemas = packed["$schemas"]
- assert isinstance(packed_schemas, Sized)
- assert len(packed_schemas) == len(expect_packed["$schemas"])
- del packed["$schemas"]
- del expect_packed["$schemas"]
-
- assert packed == expect_packed
-
-
-def test_pack_input_named_name() -> None:
- loadingContext, workflowobj, uri = fetch_document(
- get_data("tests/wf/trick_revsort.cwl")
- )
+@pytest.mark.parametrize(
+ "unpacked,expected",
+ [
+ ("tests/wf/revsort.cwl", "tests/wf/expect_packed.cwl"),
+ (
+ "tests/wf/operation/operation-single.cwl",
+ "tests/wf/operation/expect_operation-single_packed.cwl",
+ ),
+ ("tests/wf/trick_revsort.cwl", "tests/wf/expect_trick_packed.cwl"),
+ (
+ "tests/wf/revsort_datetime.cwl",
+ "tests/wf/expect_revsort_datetime_packed.cwl",
+ ),
+ ],
+)
+def test_packing(unpacked: str, expected: str) -> None:
+ """Compare expected version reality with various workflows and --pack."""
+ loadingContext, workflowobj, uri = fetch_document(get_data(unpacked))
loadingContext.do_update = False
loadingContext, uri = resolve_and_validate_document(
loadingContext, workflowobj, uri
)
- loader = loadingContext.loader
- assert loader
- loader.resolve_ref(uri)[0]
- with open(get_data("tests/wf/expect_trick_packed.cwl")) as packed_file:
- expect_packed = yaml.main.round_trip_load(packed_file)
-
- packed = cwltool.pack.pack(loadingContext, uri)
- adjustFileObjs(
- packed, partial(make_relative, os.path.abspath(get_data("tests/wf")))
- )
- adjustDirObjs(packed, partial(make_relative, os.path.abspath(get_data("tests/wf"))))
-
- assert "$schemas" in packed
- packed_schemas = packed["$schemas"]
- assert isinstance(packed_schemas, Sized)
- assert len(packed_schemas) == len(expect_packed["$schemas"])
- del packed["$schemas"]
- del expect_packed["$schemas"]
+ packed = json.loads(print_pack(loadingContext, uri))
+ if len(cast(List[Any], packed["$graph"])) == 1:
+ packed = cast(List[Any], packed["$graph"])[0]
+ context_dir = os.path.abspath(os.path.dirname(get_data(unpacked)))
+ adjustFileObjs(packed, partial(make_relative, context_dir))
+ adjustDirObjs(packed, partial(make_relative, context_dir))
+
+ with open(get_data(expected)) as packed_file:
+ expect_packed = json.load(packed_file)
+
+ if "$schemas" in expect_packed:
+ assert "$schemas" in packed
+ packed_schemas = packed["$schemas"]
+ assert isinstance(packed_schemas, Sized)
+ assert len(packed_schemas) == len(expect_packed["$schemas"])
+ del packed["$schemas"]
+ del expect_packed["$schemas"]
assert packed == expect_packed
@@ -92,8 +82,10 @@ def test_pack_single_tool() -> None:
def test_pack_fragment() -> None:
+ yaml = yaml_no_ts()
+
with open(get_data("tests/wf/scatter2_subwf.cwl")) as packed_file:
- expect_packed = yaml.main.safe_load(packed_file)
+ expect_packed = yaml.load(packed_file)
loadingContext, workflowobj, uri = fetch_document(get_data("tests/wf/scatter2.cwl"))
packed = cwltool.pack.pack(loadingContext, uri + "#scatterstep/mysub")
@@ -102,9 +94,10 @@ def test_pack_fragment() -> None:
)
adjustDirObjs(packed, partial(make_relative, os.path.abspath(get_data("tests/wf"))))
- assert json.dumps(packed, sort_keys=True, indent=2) == json.dumps(
- expect_packed, sort_keys=True, indent=2
- )
+ packed_result = json.dumps(packed, sort_keys=True, indent=2)
+ expected = json.dumps(expect_packed, sort_keys=True, indent=2)
+
+ assert packed_result == expected
def test_pack_rewrites() -> None:
@@ -155,17 +148,17 @@ def test_pack_missing_cwlVersion(cwl_path: str) -> None:
assert packed["cwlVersion"] == "v1.0"
-def test_pack_idempotence_tool() -> None:
+def test_pack_idempotence_tool(tmp_path: Path) -> None:
"""Ensure that pack produces exactly the same document for an already packed CommandLineTool."""
- _pack_idempotently("tests/wf/hello_single_tool.cwl")
+ _pack_idempotently("tests/wf/hello_single_tool.cwl", tmp_path)
-def test_pack_idempotence_workflow() -> None:
+def test_pack_idempotence_workflow(tmp_path: Path) -> None:
"""Ensure that pack produces exactly the same document for an already packed workflow."""
- _pack_idempotently("tests/wf/count-lines1-wf.cwl")
+ _pack_idempotently("tests/wf/count-lines1-wf.cwl", tmp_path)
-def _pack_idempotently(document: str) -> None:
+def _pack_idempotently(document: str, tmp_path: Path) -> None:
loadingContext, workflowobj, uri = fetch_document(get_data(document))
loadingContext.do_update = False
loadingContext, uri = resolve_and_validate_document(
@@ -179,26 +172,24 @@ def _pack_idempotently(document: str) -> None:
packed_text = print_pack(loadingContext, uri)
packed = json.loads(packed_text)
- tmp = NamedTemporaryFile(mode="w", delete=False)
- try:
- tmp.write(packed_text)
- tmp.flush()
- tmp.close()
-
- loadingContext, workflowobj, uri2 = fetch_document(tmp.name)
- loadingContext.do_update = False
- loadingContext, uri2 = resolve_and_validate_document(
- loadingContext, workflowobj, uri2
- )
- loader2 = loadingContext.loader
- assert loader2
- loader2.resolve_ref(uri2)[0]
-
- # generate pack output dict
- packed_text = print_pack(loadingContext, uri2)
- double_packed = json.loads(packed_text)
- finally:
- os.remove(tmp.name)
+ tmp_name = tmp_path / "packed.cwl"
+ tmp = tmp_name.open(mode="w")
+ tmp.write(packed_text)
+ tmp.flush()
+ tmp.close()
+
+ loadingContext, workflowobj, uri2 = fetch_document(tmp.name)
+ loadingContext.do_update = False
+ loadingContext, uri2 = resolve_and_validate_document(
+ loadingContext, workflowobj, uri2
+ )
+ loader2 = loadingContext.loader
+ assert loader2
+ loader2.resolve_ref(uri2)[0]
+
+ # generate pack output dict
+ packed_text = print_pack(loadingContext, uri2)
+ double_packed = json.loads(packed_text)
assert uri != uri2
assert packed == double_packed
@@ -213,7 +204,7 @@ def _pack_idempotently(document: str) -> None:
@needs_docker
@pytest.mark.parametrize("wf_path,job_path,namespaced", cwl_to_run)
def test_packed_workflow_execution(
- wf_path: str, job_path: str, namespaced: bool, tmpdir: py.path.local
+ wf_path: str, job_path: str, namespaced: bool, tmp_path: Path
) -> None:
loadingContext = LoadingContext()
loadingContext.resolver = tool_resolver
@@ -236,10 +227,10 @@ def test_packed_workflow_execution(
normal_output = StringIO()
packed_output = StringIO()
- normal_params = ["--outdir", str(tmpdir), get_data(wf_path), get_data(job_path)]
+ normal_params = ["--outdir", str(tmp_path), get_data(wf_path), get_data(job_path)]
packed_params = [
"--outdir",
- str(tmpdir),
+ str(tmp_path),
"--debug",
wf_packed_path,
get_data(job_path),
diff --git a/tests/test_parallel.py b/tests/test_parallel.py
index 2ff52c3c56..e989e4f0a9 100644
--- a/tests/test_parallel.py
+++ b/tests/test_parallel.py
@@ -1,33 +1,29 @@
import json
-
-import py.path
+from pathlib import Path
from cwltool.context import RuntimeContext
from cwltool.executors import MultithreadedJobExecutor
+from cwltool.factory import Factory
-from .util import get_data, get_windows_safe_factory, windows_needs_docker
+from .util import get_data
-@windows_needs_docker
-def test_sequential_workflow(tmpdir: py.path.local) -> None:
+def test_sequential_workflow(tmp_path: Path) -> None:
test_file = "tests/wf/count-lines1-wf.cwl"
executor = MultithreadedJobExecutor()
runtime_context = RuntimeContext()
- runtime_context.outdir = str(tmpdir)
+ runtime_context.outdir = str(tmp_path)
runtime_context.select_resources = executor.select_resources
- factory = get_windows_safe_factory(
- executor=executor, runtime_context=runtime_context
- )
+ factory = Factory(executor, None, runtime_context)
echo = factory.make(get_data(test_file))
file_contents = {"class": "File", "location": get_data("tests/wf/whale.txt")}
assert echo(file1=file_contents) == {"count_output": 16}
-@windows_needs_docker
def test_scattered_workflow() -> None:
test_file = "tests/wf/scatter-wf4.cwl"
job_file = "tests/wf/scatter-job2.json"
- factory = get_windows_safe_factory(executor=MultithreadedJobExecutor())
+ factory = Factory(MultithreadedJobExecutor())
echo = factory.make(get_data(test_file))
with open(get_data(job_file)) as job:
assert echo(**json.load(job)) == {"out": ["foo one three", "foo two four"]}
diff --git a/tests/test_path_checks.py b/tests/test_path_checks.py
new file mode 100644
index 0000000000..e8a300fed5
--- /dev/null
+++ b/tests/test_path_checks.py
@@ -0,0 +1,97 @@
+from pathlib import Path
+
+import pytest
+
+from cwltool.main import main
+
+from .util import needs_docker
+
+script = """
+#!/usr/bin/env cwl-runner
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+ - id: input
+ type: File
+ inputBinding:
+ position: 0
+ - id: output
+ type: string
+outputs:
+ - id: output
+ type: File
+ outputBinding:
+ glob: "$(inputs.output)"
+stdout: "$(inputs.output)"
+baseCommand: [cat]
+"""
+
+
+@needs_docker
+def test_spaces_in_input_files(tmp_path: Path) -> None:
+ script_name = tmp_path / "script"
+ spaces = tmp_path / "test with spaces"
+ spaces.touch()
+ with script_name.open(mode="w") as script_file:
+ script_file.write(script)
+
+ params = [
+ "--debug",
+ "--outdir",
+ str(tmp_path / "outdir"),
+ str(script_name),
+ "--input",
+ str(spaces),
+ "--output",
+ "test.txt",
+ ]
+ assert main(params) == 1
+ assert main(["--relax-path-checks"] + params) == 0
+
+
+@needs_docker
+@pytest.mark.parametrize(
+ "filename", ["測試", "그래프", "график", "𒁃", "☕😍", "امتحان", "abc+DEFGZ.z_12345-"]
+)
+def test_unicode_in_input_files(tmp_path: Path, filename: str) -> None:
+ script_name = tmp_path / "script"
+ inputfile = tmp_path / filename
+ inputfile.touch()
+ with script_name.open(mode="w") as script_file:
+ script_file.write(script)
+
+ params = [
+ "--debug",
+ "--outdir",
+ str(tmp_path / "outdir"),
+ str(script_name),
+ "--input",
+ str(inputfile),
+ "--output",
+ "test.txt",
+ ]
+ assert main(params) == 0
+
+
+@needs_docker
+@pytest.mark.parametrize(
+ "filename", ["測試", "그래프", "график", "𒁃", "☕😍", "امتحان", "abc+DEFGZ.z_12345-"]
+)
+def test_unicode_in_output_files(tmp_path: Path, filename: str) -> None:
+ script_name = tmp_path / "script"
+ inputfile = tmp_path / "test"
+ inputfile.touch()
+ with script_name.open(mode="w") as script_file:
+ script_file.write(script)
+
+ params = [
+ "--debug",
+ "--outdir",
+ str(tmp_path / "outdir"),
+ str(script_name),
+ "--input",
+ str(inputfile),
+ "--output",
+ filename,
+ ]
+ assert main(params) == 0
diff --git a/tests/test_pathmapper.py b/tests/test_pathmapper.py
index b957d55dfe..fae1cb5d84 100644
--- a/tests/test_pathmapper.py
+++ b/tests/test_pathmapper.py
@@ -15,7 +15,7 @@ def __init__(
stagedir: str,
new: str,
):
- super(SubPathMapper, self).__init__(referenced_files, basedir, stagedir)
+ super().__init__(referenced_files, basedir, stagedir)
self.new = new
pathmap = SubPathMapper([], "", "", "new")
diff --git a/tests/test_procgenerator.py b/tests/test_procgenerator.py
index 2c0d47a86c..39cd099146 100644
--- a/tests/test_procgenerator.py
+++ b/tests/test_procgenerator.py
@@ -1,37 +1,35 @@
-import os
+"""ProcessGenerator related tests."""
+
+
+import pytest
from cwltool.main import main
-from .util import get_data, windows_needs_docker
-
-
-@windows_needs_docker
-def test_missing_enable_ext() -> None:
- # Requires --enable-ext and --enable-dev
- try:
- opt = os.environ.get("CWLTOOL_OPTIONS")
-
- if "CWLTOOL_OPTIONS" in os.environ:
- del os.environ["CWLTOOL_OPTIONS"]
- assert main([get_data("tests/wf/generator/zing.cwl"), "--zing", "zipper"]) == 1
-
- assert (
- main(
- [
- "--enable-ext",
- "--enable-dev",
- get_data("tests/wf/generator/zing.cwl"),
- "--zing",
- "zipper",
- ]
- )
- == 0
+from .util import get_data
+
+
+def test_missing_enable_ext(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Test missing enable-ext option fails.
+
+ Check that a workflow that needs `--enable-ext` and
+ `--enable-dev` fails without those options and passes with them.
+ """
+ monkeypatch.delenv("CWLTOOL_OPTIONS", raising=False)
+ assert main([get_data("tests/wf/generator/zing.cwl"), "--zing", "zipper"]) == 1
+
+ assert (
+ main(
+ [
+ "--debug",
+ "--enable-ext",
+ "--enable-dev",
+ get_data("tests/wf/generator/zing.cwl"),
+ "--zing",
+ "zipper",
+ ]
)
+ == 0
+ )
- os.environ["CWLTOOL_OPTIONS"] = "--enable-ext --enable-dev"
- assert main([get_data("tests/wf/generator/zing.cwl"), "--zing", "zipper"]) == 0
- finally:
- if opt is not None:
- os.environ["CWLTOOL_OPTIONS"] = opt
- elif "CWLTOOL_OPTIONS" in os.environ:
- del os.environ["CWLTOOL_OPTIONS"]
+ monkeypatch.setenv("CWLTOOL_OPTIONS", "--enable-ext --enable-dev")
+ assert main([get_data("tests/wf/generator/zing.cwl"), "--zing", "zipper"]) == 0
diff --git a/tests/test_provenance.py b/tests/test_provenance.py
index 0f7c3287ab..3d2d5d8082 100644
--- a/tests/test_provenance.py
+++ b/tests/test_provenance.py
@@ -1,28 +1,24 @@
import json
import os
import pickle
-import shutil
import sys
-import tempfile
import urllib
from pathlib import Path
-from typing import Any, Generator, cast
+from typing import Any, Generator
import arcp
import bagit
-import py.path
import pytest
from rdflib import Graph, Namespace, URIRef
from rdflib.namespace import DC, DCTERMS, RDF
from rdflib.term import Literal
-# Module to be tested
-from cwltool import provenance
+from cwltool import provenance, provenance_constants
from cwltool.main import main
from cwltool.provenance import ResearchObject
from cwltool.stdfsaccess import StdFsAccess
-from .util import get_data, needs_docker, temp_dir, working_directory
+from .util import get_data, needs_docker, working_directory
# RDF namespaces we'll query for later
ORE = Namespace("http://www.openarchives.org/ore/terms/")
@@ -35,63 +31,65 @@
OA = Namespace("http://www.w3.org/ns/oa#")
-@pytest.fixture
-def folder(tmpdir: py.path.local) -> Generator[str, None, None]:
- directory = str(tmpdir)
- yield directory
-
- if not os.environ.get("DEBUG"):
- shutil.rmtree(directory)
-
-
-def cwltool(folder: str, *args: Any) -> None:
- new_args = ["--provenance", folder]
+def cwltool(tmp_path: Path, *args: Any) -> Path:
+ prov_folder = tmp_path / "provenance"
+ prov_folder.mkdir()
+ new_args = ["--provenance", str(prov_folder)]
new_args.extend(args)
# Run within a temporary directory to not pollute git checkout
- with temp_dir("cwltool-run") as tmp_dir:
- with working_directory(tmp_dir):
- status = main(new_args)
- assert status == 0, "Failed: cwltool.main(%r)" % (args)
+ tmp_dir = tmp_path / "cwltool-run"
+ tmp_dir.mkdir()
+ with working_directory(tmp_dir):
+ status = main(new_args)
+ assert status == 0, "Failed: cwltool.main(%r)" % (args)
+ return prov_folder
@needs_docker
-def test_hello_workflow(folder: str) -> None:
- cwltool(
- folder,
- get_data("tests/wf/hello-workflow.cwl"),
- "--usermessage",
- "Hello workflow",
+def test_hello_workflow(tmp_path: Path) -> None:
+ check_provenance(
+ cwltool(
+ tmp_path,
+ get_data("tests/wf/hello-workflow.cwl"),
+ "--usermessage",
+ "Hello workflow",
+ )
)
- check_provenance(folder)
@needs_docker
-def test_hello_single_tool(folder: str) -> None:
- cwltool(
- folder, get_data("tests/wf/hello_single_tool.cwl"), "--message", "Hello tool"
+def test_hello_single_tool(tmp_path: Path) -> None:
+ check_provenance(
+ cwltool(
+ tmp_path,
+ get_data("tests/wf/hello_single_tool.cwl"),
+ "--message",
+ "Hello tool",
+ ),
+ single_tool=True,
)
- check_provenance(folder, single_tool=True)
@needs_docker
-def test_revsort_workflow(folder: str) -> None:
- cwltool(
- folder, get_data("tests/wf/revsort.cwl"), get_data("tests/wf/revsort-job.json")
+def test_revsort_workflow(tmp_path: Path) -> None:
+ folder = cwltool(
+ tmp_path,
+ get_data("tests/wf/revsort.cwl"),
+ get_data("tests/wf/revsort-job.json"),
)
check_output_object(folder)
check_provenance(folder)
@needs_docker
-def test_nested_workflow(folder: str) -> None:
- cwltool(folder, get_data("tests/wf/nested.cwl"))
- check_provenance(folder, nested=True)
+def test_nested_workflow(tmp_path: Path) -> None:
+ check_provenance(cwltool(tmp_path, get_data("tests/wf/nested.cwl")), nested=True)
@needs_docker
-def test_secondary_files_implicit(folder: str, tmpdir: py.path.local) -> None:
- file1 = tmpdir.join("foo1.txt")
- file1idx = tmpdir.join("foo1.txt.idx")
+def test_secondary_files_implicit(tmp_path: Path) -> None:
+ file1 = tmp_path / "foo1.txt"
+ file1idx = tmp_path / "foo1.txt.idx"
with open(str(file1), "w", encoding="ascii") as f:
f.write("foo")
@@ -99,18 +97,20 @@ def test_secondary_files_implicit(folder: str, tmpdir: py.path.local) -> None:
f.write("bar")
# secondary will be picked up by .idx
- cwltool(folder, get_data("tests/wf/sec-wf.cwl"), "--file1", str(file1))
+ folder = cwltool(tmp_path, get_data("tests/wf/sec-wf.cwl"), "--file1", str(file1))
check_provenance(folder, secondary_files=True)
check_secondary_files(folder)
@needs_docker
-def test_secondary_files_explicit(folder: str, tmpdir: py.path.local) -> None:
- orig_tempdir = tempfile.tempdir
- tempfile.tempdir = str(tmpdir)
+def test_secondary_files_explicit(tmp_path: Path) -> None:
# Deliberately do NOT have common basename or extension
- file1 = tempfile.mktemp("foo")
- file1idx = tempfile.mktemp("bar")
+ file1dir = tmp_path / "foo"
+ file1dir.mkdir()
+ file1 = file1dir / "foo"
+ file1idxdir = tmp_path / "bar"
+ file1idxdir.mkdir()
+ file1idx = file1idxdir / "bar"
with open(file1, "w", encoding="ascii") as f:
f.write("foo")
@@ -121,41 +121,41 @@ def test_secondary_files_explicit(folder: str, tmpdir: py.path.local) -> None:
job = {
"file1": {
"class": "File",
- "path": file1,
+ "path": str(file1),
"basename": "foo1.txt",
"secondaryFiles": [
{
"class": "File",
- "path": file1idx,
+ "path": str(file1idx),
"basename": "foo1.txt.idx",
}
],
}
}
- jobJson = tempfile.mktemp("job.json")
+
+ jobJson = tmp_path / "job.json"
with open(jobJson, "wb") as fp:
j = json.dumps(job, ensure_ascii=True)
fp.write(j.encode("ascii"))
- cwltool(folder, get_data("tests/wf/sec-wf.cwl"), jobJson)
+ folder = cwltool(tmp_path, get_data("tests/wf/sec-wf.cwl"), str(jobJson))
check_provenance(folder, secondary_files=True)
check_secondary_files(folder)
- tempfile.tempdir = orig_tempdir
@needs_docker
-def test_secondary_files_output(folder: str) -> None:
+def test_secondary_files_output(tmp_path: Path) -> None:
# secondary will be picked up by .idx
- cwltool(folder, get_data("tests/wf/sec-wf-out.cwl"))
+ folder = cwltool(tmp_path, get_data("tests/wf/sec-wf-out.cwl"))
check_provenance(folder, secondary_files=True)
# Skipped, not the same secondary files as above
# self.check_secondary_files()
@needs_docker
-def test_directory_workflow(folder: str, tmpdir: py.path.local) -> None:
- dir2 = tmpdir.join("dir2")
- os.makedirs(str(dir2))
+def test_directory_workflow(tmp_path: Path) -> None:
+ dir2 = tmp_path / "dir2"
+ dir2.mkdir()
sha1 = {
# Expected hashes of ASCII letters (no linefeed)
# as returned from:
@@ -166,34 +166,34 @@ def test_directory_workflow(folder: str, tmpdir: py.path.local) -> None:
}
for x in "abc":
# Make test files with predictable hashes
- with open(str(dir2.join(x)), "w", encoding="ascii") as f:
+ with open(dir2 / x, "w", encoding="ascii") as f:
f.write(x)
- cwltool(folder, get_data("tests/wf/directory.cwl"), "--dir", str(dir2))
+ folder = cwltool(tmp_path, get_data("tests/wf/directory.cwl"), "--dir", str(dir2))
check_provenance(folder, directory=True)
# Output should include ls stdout of filenames a b c on each line
- file_list = os.path.join(
- folder,
- "data",
+ file_list = (
+ folder
+ / "data"
+ / "3c"
+ / "3ca69e8d6c234a469d16ac28a4a658c92267c423"
# checksum as returned from:
# echo -e "a\nb\nc" | sha1sum
# 3ca69e8d6c234a469d16ac28a4a658c92267c423 -
- "3c",
- "3ca69e8d6c234a469d16ac28a4a658c92267c423",
)
- assert os.path.isfile(file_list)
+ assert file_list.is_file()
# Input files should be captured by hash value,
# even if they were inside a class: Directory
for (l, l_hash) in sha1.items():
prefix = l_hash[:2] # first 2 letters
- p = os.path.join(folder, "data", prefix, l_hash)
- assert os.path.isfile(p), "Could not find %s as %s" % (l, p)
+ p = folder / "data" / prefix / l_hash
+ assert p.is_file(), f"Could not find {l} as {p}"
-def check_output_object(base_path: str) -> None:
- output_obj = os.path.join(base_path, "workflow", "primary-output.json")
+def check_output_object(base_path: Path) -> None:
+ output_obj = base_path / "workflow" / "primary-output.json"
compare_checksum = "sha1$b9214658cc453331b62c2282b772a5c063dbd284"
compare_location = "../data/b9/b9214658cc453331b62c2282b772a5c063dbd284"
with open(output_obj) as fp:
@@ -203,23 +203,21 @@ def check_output_object(base_path: str) -> None:
assert f1["location"] == compare_location
-def check_secondary_files(base_path: str) -> None:
- foo_data = os.path.join(
- base_path,
- "data",
+def check_secondary_files(base_path: Path) -> None:
+ foo_data = (
+ base_path
+ / "data"
+ / "0b"
+ / "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"
# checksum as returned from:
# $ echo -n foo | sha1sum
# 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 -
- "0b",
- "0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33",
- )
- bar_data = os.path.join(
- base_path, "data", "62", "62cdb7020ff920e5aa642c3d4066950dd1f01f4d"
)
- assert os.path.isfile(foo_data), "Did not capture file.txt 'foo'"
- assert os.path.isfile(bar_data), "Did not capture secondary file.txt.idx 'bar"
+ bar_data = base_path / "data" / "62" / "62cdb7020ff920e5aa642c3d4066950dd1f01f4d"
+ assert foo_data.is_file(), "Did not capture file.txt 'foo'"
+ assert bar_data.is_file(), "Did not capture secondary file.txt.idx 'bar"
- primary_job = os.path.join(base_path, "workflow", "primary-job.json")
+ primary_job = base_path / "workflow" / "primary-job.json"
with open(primary_job) as fp:
job_json = json.load(fp)
# TODO: Verify secondaryFile in primary-job.json
@@ -235,7 +233,7 @@ def check_secondary_files(base_path: str) -> None:
def check_provenance(
- base_path: str,
+ base_path: Path,
nested: bool = False,
single_tool: bool = False,
directory: bool = False,
@@ -253,7 +251,7 @@ def check_provenance(
)
-def check_folders(base_path: str) -> None:
+def check_folders(base_path: Path) -> None:
required_folders = [
"data",
"snapshot",
@@ -263,10 +261,10 @@ def check_folders(base_path: str) -> None:
]
for folder in required_folders:
- assert os.path.isdir(os.path.join(base_path, folder))
+ assert (base_path / folder).is_dir()
-def check_bagit(base_path: str) -> None:
+def check_bagit(base_path: Path) -> None:
# check bagit structure
required_files = [
"bagit.txt",
@@ -277,10 +275,9 @@ def check_bagit(base_path: str) -> None:
]
for basename in required_files:
- file_path = os.path.join(base_path, basename)
- assert os.path.isfile(file_path)
+ assert (base_path / basename).is_file()
- bag = bagit.Bag(base_path)
+ bag = bagit.Bag(str(base_path))
assert bag.has_oxum()
(only_manifest, only_fs) = bag.compare_manifests_with_fs()
assert not list(only_manifest), "Some files only in manifest"
@@ -292,16 +289,16 @@ def check_bagit(base_path: str) -> None:
assert arcp.is_arcp_uri(bag.info.get("External-Identifier"))
-def find_arcp(base_path: str) -> str:
+def find_arcp(base_path: Path) -> str:
# First try to find External-Identifier
- bag = bagit.Bag(base_path)
+ bag = bagit.Bag(str(base_path))
ext_id = bag.info.get("External-Identifier")
if arcp.is_arcp_uri(ext_id):
- return cast(str, ext_id)
+ return str(ext_id)
raise Exception("Can't find External-Identifier")
-def _arcp2file(base_path: str, uri: str) -> str:
+def _arcp2file(base_path: Path, uri: str) -> Path:
parsed = arcp.parse_arcp(uri)
# arcp URIs, ensure they are local to our RO
assert (
@@ -309,14 +306,12 @@ def _arcp2file(base_path: str, uri: str) -> str:
), "arcp URI must be local to the research object"
path = parsed.path[1:] # Strip first /
- # Convert to local path, in case it uses \ on Windows
- lpath = str(Path(path))
- return os.path.join(base_path, lpath)
+ return base_path / Path(path)
-def check_ro(base_path: str, nested: bool = False) -> None:
- manifest_file = os.path.join(base_path, "metadata", "manifest.json")
- assert os.path.isfile(manifest_file), "Can't find " + manifest_file
+def check_ro(base_path: Path, nested: bool = False) -> None:
+ manifest_file = base_path / "metadata" / "manifest.json"
+ assert manifest_file.is_file(), f"Can't find {manifest_file}"
arcp_root = find_arcp(base_path)
base = urllib.parse.urljoin(arcp_root, "metadata/manifest.json")
g = Graph()
@@ -324,7 +319,7 @@ def check_ro(base_path: str, nested: bool = False) -> None:
# Avoid resolving JSON-LD context https://w3id.org/bundle/context
# so this test works offline
context = Path(get_data("tests/bundle-context.jsonld")).as_uri()
- with open(manifest_file, "r", encoding="UTF-8") as fh:
+ with open(manifest_file, encoding="UTF-8") as fh:
jsonld = fh.read()
# replace with file:/// URI
jsonld = jsonld.replace("https://w3id.org/bundle/context", context)
@@ -332,24 +327,24 @@ def check_ro(base_path: str, nested: bool = False) -> None:
if os.environ.get("DEBUG"):
print("Parsed manifest:\n\n")
g.serialize(sys.stdout, format="ttl")
- ro = None
+ _ro = None
- for ro in g.subjects(ORE.isDescribedBy, URIRef(base)):
+ for _ro in g.subjects(ORE.isDescribedBy, URIRef(base)):
break
- assert ro is not None, "Can't find RO with ore:isDescribedBy"
+ assert _ro is not None, "Can't find RO with ore:isDescribedBy"
profile = None
- for dc in g.objects(ro, DCTERMS.conformsTo):
+ for dc in g.objects(_ro, DCTERMS.conformsTo):
profile = dc
break
assert profile is not None, "Can't find profile with dct:conformsTo"
- assert profile == URIRef(provenance.CWLPROV_VERSION), (
+ assert profile == URIRef(provenance_constants.CWLPROV_VERSION), (
"Unexpected cwlprov version " + profile
)
paths = []
externals = []
- for aggregate in g.objects(ro, ORE.aggregates):
+ for aggregate in g.objects(_ro, ORE.aggregates):
if not arcp.is_arcp_uri(aggregate):
externals.append(aggregate)
# Won't check external URIs existence here
@@ -357,7 +352,7 @@ def check_ro(base_path: str, nested: bool = False) -> None:
continue
lfile = _arcp2file(base_path, aggregate)
paths.append(os.path.relpath(lfile, base_path))
- assert os.path.isfile(lfile), "Can't find aggregated " + lfile
+ assert os.path.isfile(lfile), f"Can't find aggregated {lfile}"
assert paths, "Didn't find any arcp aggregates"
assert externals, "Didn't find any data URIs"
@@ -395,10 +390,10 @@ def check_ro(base_path: str, nested: bool = False) -> None:
assert (d, OA.hasTarget, URIRef(uuid.urn)) in g
linked = set(g.subjects(OA.motivatedBy, OA.linking))
- for l in linked:
- assert (l, OA.hasBody, URIRef(packed)) in g
- assert (l, OA.hasBody, URIRef(primary_job)) in g
- assert (l, OA.hasTarget, URIRef(uuid.urn)) in g
+ for link in linked:
+ assert (link, OA.hasBody, URIRef(packed)) in g
+ assert (link, OA.hasBody, URIRef(primary_job)) in g
+ assert (link, OA.hasTarget, URIRef(uuid.urn)) in g
has_provenance = set(g.subjects(OA.hasBody, URIRef(primary_prov_nt)))
for p in has_provenance:
@@ -407,11 +402,15 @@ def check_ro(base_path: str, nested: bool = False) -> None:
# Check all prov elements are listed
formats = set()
for prov in g.objects(p, OA.hasBody):
- assert (prov, DCTERMS.conformsTo, URIRef(provenance.CWLPROV_VERSION)) in g
+ assert (
+ prov,
+ DCTERMS.conformsTo,
+ URIRef(provenance_constants.CWLPROV_VERSION),
+ ) in g
# NOTE: DC.format is a Namespace method and does not resolve like other terms
formats.update(set(g.objects(prov, DC["format"])))
assert formats, "Could not find media types"
- expected = set(
+ expected = {
Literal(f)
for f in (
"application/json",
@@ -421,7 +420,7 @@ def check_ro(base_path: str, nested: bool = False) -> None:
'text/turtle; charset="UTF-8"',
"application/xml",
)
- )
+ }
assert formats == expected, "Did not match expected PROV media types"
if nested:
@@ -436,14 +435,14 @@ def check_ro(base_path: str, nested: bool = False) -> None:
def check_prov(
- base_path: str,
+ base_path: Path,
nested: bool = False,
single_tool: bool = False,
directory: bool = False,
secondary_files: bool = False,
) -> None:
- prov_file = os.path.join(base_path, "metadata", "provenance", "primary.cwlprov.nt")
- assert os.path.isfile(prov_file), "Can't find " + prov_file
+ prov_file = base_path / "metadata" / "provenance" / "primary.cwlprov.nt"
+ assert prov_file.is_file(), f"Can't find {prov_file}"
arcp_root = find_arcp(base_path)
# Note: We don't need to include metadata/provnance in base URI
# as .nt always use absolute URIs
@@ -458,7 +457,7 @@ def check_prov(
# main workflow run URI (as urn:uuid:) should correspond to arcp uuid part
uuid = arcp.parse_arcp(arcp_root).uuid
main_run = URIRef(uuid.urn)
- assert main_run in runs, "Can't find run %s in %s" % (main_run, runs)
+ assert main_run in runs, f"Can't find run {main_run} in {runs}"
# TODO: we should not need to parse arcp, but follow
# the has_provenance annotations in manifest.json instead
@@ -572,7 +571,7 @@ def check_prov(
sec_basename = set(g.objects(sec, CWLPROV.basename)).pop()
sec_nameroot = set(g.objects(sec, CWLPROV.nameroot)).pop()
sec_nameext = set(g.objects(sec, CWLPROV.nameext)).pop()
- assert str(sec_basename) == "%s%s" % (sec_nameroot, sec_nameext)
+ assert str(sec_basename) == f"{sec_nameroot}{sec_nameext}"
# TODO: Check hash data file exist in RO
# The primary entity should have the same, but different values
@@ -580,7 +579,7 @@ def check_prov(
prim_basename = set(g.objects(prim, CWLPROV.basename)).pop()
prim_nameroot = set(g.objects(prim, CWLPROV.nameroot)).pop()
prim_nameext = set(g.objects(prim, CWLPROV.nameext)).pop()
- assert str(prim_basename) == "%s%s" % (prim_nameroot, prim_nameext)
+ assert str(prim_basename) == f"{prim_nameroot}{prim_nameext}"
@pytest.fixture
@@ -604,12 +603,11 @@ def test_writable_string(research_object: ResearchObject) -> None:
with research_object.write_bag_file("file.txt") as fh:
assert fh.writable()
fh.write("Hello\n")
- # TODO: Check Windows does not modify \n to \r\n here
sha1 = os.path.join(research_object.folder, "tagmanifest-sha1.txt")
assert os.path.isfile(sha1)
- with open(sha1, "r", encoding="UTF-8") as sha_file:
+ with open(sha1, encoding="UTF-8") as sha_file:
stripped_sha = sha_file.readline().strip()
assert stripped_sha.endswith("file.txt")
# stain@biggie:~/src/cwltool$ echo Hello | sha1sum
@@ -619,7 +617,7 @@ def test_writable_string(research_object: ResearchObject) -> None:
sha256 = os.path.join(research_object.folder, "tagmanifest-sha256.txt")
assert os.path.isfile(sha256)
- with open(sha256, "r", encoding="UTF-8") as sha_file:
+ with open(sha256, encoding="UTF-8") as sha_file:
stripped_sha = sha_file.readline().strip()
assert stripped_sha.endswith("file.txt")
@@ -640,7 +638,7 @@ def test_writable_unicode_string(research_object: ResearchObject) -> None:
def test_writable_bytes(research_object: ResearchObject) -> None:
- string = "Here is a snowman: \u2603 \n".encode("UTF-8")
+ string = "Here is a snowman: \u2603 \n".encode()
with research_object.write_bag_file("file.txt", encoding=None) as fh:
fh.write(string) # type: ignore
@@ -649,13 +647,12 @@ def test_data(research_object: ResearchObject) -> None:
with research_object.write_bag_file("data/file.txt") as fh:
assert fh.writable()
fh.write("Hello\n")
- # TODO: Check Windows does not modify \n to \r\n here
# Because this is under data/ it should add to manifest
# rather than tagmanifest
sha1 = os.path.join(research_object.folder, "manifest-sha1.txt")
assert os.path.isfile(sha1)
- with open(sha1, "r", encoding="UTF-8") as fh2:
+ with open(sha1, encoding="UTF-8") as fh2:
stripped_sha = fh2.readline().strip()
assert stripped_sha.endswith("data/file.txt")
@@ -663,14 +660,14 @@ def test_data(research_object: ResearchObject) -> None:
def test_not_seekable(research_object: ResearchObject) -> None:
with research_object.write_bag_file("file.txt") as fh:
assert not fh.seekable()
- with pytest.raises(IOError):
+ with pytest.raises(OSError):
fh.seek(0)
def test_not_readable(research_object: ResearchObject) -> None:
with research_object.write_bag_file("file.txt") as fh:
assert not fh.readable()
- with pytest.raises(IOError):
+ with pytest.raises(OSError):
fh.read()
@@ -679,7 +676,7 @@ def test_truncate_fails(research_object: ResearchObject) -> None:
fh.write("Hello there")
fh.truncate() # OK as we're always at end
# Will fail because the checksum can't rewind
- with pytest.raises(IOError):
+ with pytest.raises(OSError):
fh.truncate(0)
diff --git a/tests/test_rdfprint.py b/tests/test_rdfprint.py
index 308265d913..63adb1c74b 100644
--- a/tests/test_rdfprint.py
+++ b/tests/test_rdfprint.py
@@ -1,7 +1,8 @@
-import os
import subprocess
import sys
+import pytest
+
from cwltool.main import main
from .util import get_data
@@ -11,21 +12,16 @@ def test_rdf_print() -> None:
assert main(["--print-rdf", get_data("tests/wf/hello_single_tool.cwl")]) == 0
-def test_rdf_print_unicode() -> None:
+def test_rdf_print_unicode(monkeypatch: pytest.MonkeyPatch) -> None:
"""Force ASCII encoding but load UTF file with --print-rdf."""
- try:
- lc_all = os.environ.get("LC_ALL", None)
- os.environ["LC_ALL"] = "C"
-
- params = [
- sys.executable,
- "-m",
- "cwltool",
- "--print-rdf",
- get_data("tests/utf_doc_example.cwl"),
- ]
-
- assert subprocess.check_call(params) == 0
- finally:
- if lc_all:
- os.environ["LC_ALL"] = lc_all
+ monkeypatch.setenv("LC_ALL", "C")
+
+ params = [
+ sys.executable,
+ "-m",
+ "cwltool",
+ "--print-rdf",
+ get_data("tests/utf_doc_example.cwl"),
+ ]
+
+ assert subprocess.check_call(params) == 0
diff --git a/tests/test_relax_path_checks.py b/tests/test_relax_path_checks.py
deleted file mode 100644
index f5e4579cf9..0000000000
--- a/tests/test_relax_path_checks.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import os
-from tempfile import NamedTemporaryFile
-
-import py.path
-
-from cwltool.main import main
-
-from .util import needs_docker
-
-script = """
-#!/usr/bin/env cwl-runner
-cwlVersion: v1.0
-class: CommandLineTool
-inputs:
- - id: input
- type: File
- inputBinding:
- position: 0
-outputs:
- - id: output
- type: File
- outputBinding:
- glob: test.txt
-stdout: test.txt
-baseCommand: [cat]
-"""
-
-
-@needs_docker
-def test_spaces_in_input_files(tmpdir: py.path.local) -> None:
- try:
- script_file = NamedTemporaryFile(mode="w", delete=False)
- script_file.write(script)
- script_file.flush()
- script_file.close()
-
- spaces = NamedTemporaryFile(prefix="test with spaces", delete=False)
- spaces.close()
-
- params = [
- "--debug",
- "--outdir",
- str(tmpdir),
- script_file.name,
- "--input",
- spaces.name,
- ]
- assert main(params) == 1
- assert main(["--relax-path-checks"] + params) == 0
- finally:
- os.remove(script_file.name)
- os.remove(spaces.name)
diff --git a/tests/test_relocate.py b/tests/test_relocate.py
index a2d5872e4d..1d0ffb56cb 100644
--- a/tests/test_relocate.py
+++ b/tests/test_relocate.py
@@ -1,9 +1,10 @@
import json
import sys
+from pathlib import Path
from cwltool.main import main
-from .util import get_data, needs_docker, temp_dir
+from .util import get_data, needs_docker
if sys.version_info[0] < 3:
from StringIO import StringIO
@@ -18,17 +19,16 @@ def test_for_910() -> None:
@needs_docker
-def test_for_conflict_file_names() -> None:
+def test_for_conflict_file_names(tmp_path: Path) -> None:
stream = StringIO()
- with temp_dir() as tmp:
- assert (
- main(
- ["--debug", "--outdir", tmp, get_data("tests/wf/conflict.cwl")],
- stdout=stream,
- )
- == 0
+ assert (
+ main(
+ ["--debug", "--outdir", str(tmp_path), get_data("tests/wf/conflict.cwl")],
+ stdout=stream,
)
+ == 0
+ )
out = json.loads(stream.getvalue())
assert out["b1"]["basename"] == out["b2"]["basename"]
diff --git a/tests/test_schemadef.py b/tests/test_schemadef.py
new file mode 100644
index 0000000000..224ebe89f4
--- /dev/null
+++ b/tests/test_schemadef.py
@@ -0,0 +1,11 @@
+"""Tests related SchemaDefRequirement."""
+
+from cwltool.main import main
+
+from .util import get_data
+
+
+def test_schemadef() -> None:
+ """Confirm bug 1473 is fixed by checking that the test case validates."""
+ exit_code = main(["--validate", get_data("tests/wf/schemadef-bug-1473.cwl")])
+ assert exit_code == 0
diff --git a/tests/test_singularity.py b/tests/test_singularity.py
index 28fec9462f..2291b4a3de 100644
--- a/tests/test_singularity.py
+++ b/tests/test_singularity.py
@@ -1,9 +1,7 @@
-import distutils.spawn
-import os
-import sys
+"""Tests to find local Singularity image."""
+import shutil
from pathlib import Path
-
-import py.path
+from typing import Any
from cwltool.main import main
@@ -15,37 +13,35 @@
working_directory,
)
-sys.argv = [""]
-
@needs_singularity_2_6
-def test_singularity_pullfolder(tmp_path: Path) -> None:
+def test_singularity_pullfolder(tmp_path: Path, monkeypatch: Any) -> None:
+ """Test singularity respects SINGULARITY_PULLFOLDER."""
workdir = tmp_path / "working_dir_new"
workdir.mkdir()
- os.chdir(str(workdir))
- pullfolder = tmp_path / "pullfolder"
- pullfolder.mkdir()
- env = os.environ.copy()
- env["SINGULARITY_PULLFOLDER"] = str(pullfolder)
- result_code, stdout, stderr = get_main_output(
- [
- "--singularity",
- get_data("tests/sing_pullfolder_test.cwl"),
- "--message",
- "hello",
- ],
- env=env,
- )
- print(stdout)
- print(stderr)
- assert result_code == 0
- image = pullfolder / "debian.img"
- assert image.exists()
+ with working_directory(workdir):
+ pullfolder = tmp_path / "pullfolder"
+ pullfolder.mkdir()
+ result_code, stdout, stderr = get_main_output(
+ [
+ "--singularity",
+ get_data("tests/sing_pullfolder_test.cwl"),
+ "--message",
+ "hello",
+ ],
+ extra_env={"SINGULARITY_PULLFOLDER": str(pullfolder)},
+ monkeypatch=monkeypatch,
+ )
+ print(stdout)
+ print(stderr)
+ assert result_code == 0
+ image = pullfolder / "debian.img"
+ assert image.exists()
@needs_singularity
-def test_singularity_workflow(tmpdir: py.path.local) -> None:
- with working_directory(str(tmpdir)):
+def test_singularity_workflow(tmp_path: Path) -> None:
+ with working_directory(tmp_path):
error_code, _, stderr = get_main_output(
[
"--singularity",
@@ -72,7 +68,7 @@ def test_singularity_iwdr() -> None:
"hello",
]
)
- singularity_installed = bool(distutils.spawn.find_executable("singularity"))
+ singularity_installed = bool(shutil.which("singularity"))
if singularity_installed:
assert result_code == 0
else:
@@ -98,32 +94,37 @@ def test_singularity_incorrect_image_pull() -> None:
def test_singularity_local(tmp_path: Path) -> None:
workdir = tmp_path / "working_dir"
workdir.mkdir()
- os.chdir(str(workdir))
- result_code, stdout, stderr = get_main_output(
- [
- "--singularity",
- get_data("tests/sing_pullfolder_test.cwl"),
- "--message",
- "hello",
- ]
- )
- assert result_code == 0
+ with working_directory(workdir):
+ result_code, stdout, stderr = get_main_output(
+ [
+ "--singularity",
+ get_data("tests/sing_pullfolder_test.cwl"),
+ "--message",
+ "hello",
+ ]
+ )
+ assert result_code == 0
@needs_singularity_2_6
def test_singularity_docker_image_id_in_tool(tmp_path: Path) -> None:
workdir = tmp_path / "working_dir"
workdir.mkdir()
- os.chdir(str(workdir))
- result_code, stdout, stderr = get_main_output(
- [
- "--singularity",
- get_data("tests/sing_pullfolder_test.cwl"),
- "--message",
- "hello",
- ]
- )
- result_code1, stdout, stderr = get_main_output(
- ["--singularity", get_data("tests/debian_image_id.cwl"), "--message", "hello"]
- )
- assert result_code1 == 0
+ with working_directory(workdir):
+ result_code, stdout, stderr = get_main_output(
+ [
+ "--singularity",
+ get_data("tests/sing_pullfolder_test.cwl"),
+ "--message",
+ "hello",
+ ]
+ )
+ result_code1, stdout, stderr = get_main_output(
+ [
+ "--singularity",
+ get_data("tests/debian_image_id.cwl"),
+ "--message",
+ "hello",
+ ]
+ )
+ assert result_code1 == 0
diff --git a/tests/test_streaming.py b/tests/test_streaming.py
new file mode 100644
index 0000000000..3c5526592d
--- /dev/null
+++ b/tests/test_streaming.py
@@ -0,0 +1,108 @@
+"""Test that files marked as 'streamable' when 'streaming_allowed' can be named pipes."""
+import os
+from pathlib import Path
+from typing import cast
+
+import pytest
+from ruamel.yaml.comments import CommentedMap
+from schema_salad.sourceline import cmap
+
+from cwltool.command_line_tool import CommandLineTool
+from cwltool.context import LoadingContext, RuntimeContext
+from cwltool.errors import WorkflowException
+from cwltool.job import JobBase
+from cwltool.update import INTERNAL_VERSION, ORIGINAL_CWLVERSION
+from cwltool.utils import CWLObjectType
+
+from .util import get_data
+
+toolpath_object = cast(
+ CommentedMap,
+ cmap(
+ {
+ "cwlVersion": INTERNAL_VERSION,
+ "class": "CommandLineTool",
+ "inputs": [
+ {
+ "type": "File",
+ "id": "inp",
+ "streamable": True,
+ }
+ ],
+ "outputs": [],
+ "requirements": [],
+ }
+ ),
+)
+
+loading_context = LoadingContext(
+ {
+ "metadata": {
+ "cwlVersion": INTERNAL_VERSION,
+ ORIGINAL_CWLVERSION: INTERNAL_VERSION,
+ }
+ }
+)
+
+
+def test_regular_file() -> None:
+ """Test that regular files do not raise any exception when they are checked in job._setup."""
+ clt = CommandLineTool(
+ toolpath_object,
+ loading_context,
+ )
+ runtime_context = RuntimeContext()
+
+ joborder: CWLObjectType = {
+ "inp": {
+ "class": "File",
+ "location": get_data("tests/wf/whale.txt"),
+ }
+ }
+
+ job = next(clt.job(joborder, None, runtime_context))
+ assert isinstance(job, JobBase)
+
+ job._setup(runtime_context)
+
+
+streaming = [
+ (True, True, False),
+ (True, False, True),
+ (False, True, True),
+ (False, False, True),
+]
+
+
+@pytest.mark.parametrize("streamable,streaming_allowed,raise_exception", streaming)
+def test_input_can_be_named_pipe(
+ tmp_path: Path, streamable: bool, streaming_allowed: bool, raise_exception: bool
+) -> None:
+ """Test that input can be a named pipe."""
+ clt = CommandLineTool(
+ toolpath_object,
+ loading_context,
+ )
+
+ runtime_context = RuntimeContext()
+ runtime_context.streaming_allowed = streaming_allowed
+
+ path = tmp_path / "tmp"
+ os.mkfifo(path)
+
+ joborder: CWLObjectType = {
+ "inp": {
+ "class": "File",
+ "location": str(path),
+ "streamable": streamable,
+ }
+ }
+
+ job = next(clt.job(joborder, None, runtime_context))
+ assert isinstance(job, JobBase)
+
+ if raise_exception:
+ with pytest.raises(WorkflowException):
+ job._setup(runtime_context)
+ else:
+ job._setup(runtime_context)
diff --git a/tests/test_subgraph.py b/tests/test_subgraph.py
index a2731c2c39..ee4cb837d8 100644
--- a/tests/test_subgraph.py
+++ b/tests/test_subgraph.py
@@ -6,30 +6,32 @@
from cwltool.context import LoadingContext
from cwltool.load_tool import load_tool
-from cwltool.subgraph import get_subgraph
+from cwltool.subgraph import get_step, get_subgraph
from cwltool.workflow import Workflow, default_make_tool
-from .test_fetch import norm
from .util import get_data
+def clean(val: Any, path: str) -> Any:
+ """Remove the path prefix from an string values."""
+ if isinstance(val, str):
+ if val.startswith(path):
+ return val[len(path) + 1 :]
+ if isinstance(val, dict):
+ return {k: clean(v, path) for k, v in val.items()}
+ if isinstance(val, list):
+ return [clean(v, path) for v in val]
+ return val
+
+
def test_get_subgraph() -> None:
+ """Compare known correct subgraphs to generated subgraphs."""
loadingContext = LoadingContext({"construct_tool_object": default_make_tool})
- wf = norm(Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri())
+ wf = Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri()
loadingContext.do_update = False
tool = load_tool(wf, loadingContext)
- sg = norm(Path(get_data("tests/subgraph")).as_uri())
-
- def clean(val: Any) -> Any:
- if isinstance(val, str):
- if val.startswith(sg):
- return val[len(sg) + 1 :]
- if isinstance(val, dict):
- return {k: clean(v) for k, v in val.items()}
- if isinstance(val, list):
- return [clean(v) for v in val]
- return val
+ sg = Path(get_data("tests/subgraph")).as_uri()
for a in (
"file1",
@@ -48,4 +50,40 @@ def clean(val: Any) -> Any:
assert isinstance(tool, Workflow)
extracted = get_subgraph([wf + "#" + a], tool)
with open(get_data("tests/subgraph/extract_" + a + ".json")) as f:
- assert json.load(f) == clean(convert_to_dict(extracted))
+ assert json.load(f) == clean(convert_to_dict(extracted), sg)
+
+
+def test_get_subgraph_long_out_form() -> None:
+ """Compare subgraphs generatation when 'out' is in the long form."""
+ loadingContext = LoadingContext({"construct_tool_object": default_make_tool})
+ wf = Path(get_data("tests/subgraph/1432.cwl")).as_uri()
+ loadingContext.do_update = False
+ tool = load_tool(wf, loadingContext)
+
+ sg = Path(get_data("tests/")).as_uri()
+
+ assert isinstance(tool, Workflow)
+ extracted = get_subgraph([wf + "#step2"], tool)
+ with open(get_data("tests/subgraph/extract_step2_1432.json")) as f:
+ assert json.load(f) == clean(convert_to_dict(extracted), sg)
+
+
+def test_get_step() -> None:
+ loadingContext = LoadingContext({"construct_tool_object": default_make_tool})
+ wf = Path(get_data("tests/subgraph/count-lines1-wf.cwl")).as_uri()
+ loadingContext.do_update = False
+ tool = load_tool(wf, loadingContext)
+ assert isinstance(tool, Workflow)
+
+ sg = Path(get_data("tests/subgraph")).as_uri()
+
+ for a in (
+ "step1",
+ "step2",
+ "step3",
+ "step4",
+ "step5",
+ ):
+ extracted = get_step(tool, wf + "#" + a)
+ with open(get_data("tests/subgraph/single_" + a + ".json")) as f:
+ assert json.load(f) == clean(convert_to_dict(extracted), sg)
diff --git a/tests/test_target.py b/tests/test_target.py
index 37e3c362ae..400027ec83 100644
--- a/tests/test_target.py
+++ b/tests/test_target.py
@@ -1,9 +1,8 @@
from cwltool.main import main
-from .util import get_data, windows_needs_docker
+from .util import get_data
-@windows_needs_docker
def test_target() -> None:
"""Test --target option successful."""
test_file = "tests/wf/scatter-wf4.cwl"
@@ -30,7 +29,6 @@ def test_wrong_target() -> None:
assert exit_code == 1
-@windows_needs_docker
def test_target_packed() -> None:
"""Test --target option with packed workflow schema."""
test_file = "tests/wf/scatter-wf4.json"
diff --git a/tests/test_tmpdir.py b/tests/test_tmpdir.py
index e7a9b61e36..90387b31ee 100644
--- a/tests/test_tmpdir.py
+++ b/tests/test_tmpdir.py
@@ -4,7 +4,7 @@
from pathlib import Path
from typing import List, cast
-from _pytest.monkeypatch import MonkeyPatch
+import pytest
from ruamel.yaml.comments import CommentedMap
from schema_salad.avro import schema
from schema_salad.sourceline import cmap
@@ -16,7 +16,7 @@
from cwltool.job import JobBase
from cwltool.pathmapper import MapperEnt, PathMapper
from cwltool.stdfsaccess import StdFsAccess
-from cwltool.update import INTERNAL_VERSION
+from cwltool.update import INTERNAL_VERSION, ORIGINAL_CWLVERSION
from cwltool.utils import create_tmp_dir
from .util import get_data, needs_docker
@@ -28,7 +28,7 @@ def test_docker_commandLineTool_job_tmpdir_prefix(tmp_path: Path) -> None:
{
"metadata": {
"cwlVersion": INTERNAL_VERSION,
- "http://commonwl.org/cwltool#original_cwlVersion": INTERNAL_VERSION,
+ ORIGINAL_CWLVERSION: INTERNAL_VERSION,
}
}
)
@@ -70,7 +70,7 @@ def test_commandLineTool_job_tmpdir_prefix(tmp_path: Path) -> None:
{
"metadata": {
"cwlVersion": INTERNAL_VERSION,
- "http://commonwl.org/cwltool#original_cwlVersion": INTERNAL_VERSION,
+ ORIGINAL_CWLVERSION: INTERNAL_VERSION,
}
}
)
@@ -105,7 +105,9 @@ def test_commandLineTool_job_tmpdir_prefix(tmp_path: Path) -> None:
@needs_docker
-def test_dockerfile_tmpdir_prefix(tmp_path: Path, monkeypatch: MonkeyPatch) -> None:
+def test_dockerfile_tmpdir_prefix(
+ tmp_path: Path, monkeypatch: pytest.MonkeyPatch
+) -> None:
"""Test that DockerCommandLineJob.get_image respects temp directory directives."""
monkeypatch.setattr(
target=subprocess, name="check_call", value=lambda *args, **kwargs: True
diff --git a/tests/test_toolargparse.py b/tests/test_toolargparse.py
index d05d5964be..1ac723d52e 100644
--- a/tests/test_toolargparse.py
+++ b/tests/test_toolargparse.py
@@ -1,10 +1,8 @@
import argparse
-import os
from io import StringIO
-from tempfile import NamedTemporaryFile
+from pathlib import Path
from typing import Callable
-import py.path
import pytest
import cwltool.executors
@@ -87,37 +85,32 @@
@needs_docker
@pytest.mark.parametrize("name,script_contents,params", scripts_argparse_params)
def test_argparse(
- name: str, script_contents: str, params: Callable[[str], str], tmpdir: py.path.local
+ name: str, script_contents: str, params: Callable[[str], str], tmp_path: Path
) -> None:
- script = None
+ script_name = tmp_path / "script"
try:
- script = NamedTemporaryFile(mode="w", delete=False)
- script.write(script_contents)
- script.close()
+ with script_name.open(mode="w") as script:
+ script.write(script_contents)
- my_params = ["--outdir", str(tmpdir)]
+ my_params = ["--outdir", str(tmp_path / "outdir")]
my_params.extend(params(script.name))
assert main(my_params) == 0, name
except SystemExit as err:
assert err.code == 0, name
- finally:
- if script and script.name and os.path.exists(script.name):
- os.unlink(script.name)
-def test_dont_require_inputs() -> None:
+def test_dont_require_inputs(tmp_path: Path) -> None:
stream = StringIO()
- script = None
+ script_name = tmp_path / "script"
try:
- script = NamedTemporaryFile(mode="w", delete=False)
- script.write(script_a)
- script.close()
+ with script_name.open(mode="w") as script:
+ script.write(script_a)
assert (
main(
- argsl=["--debug", script.name, "--input", script.name],
+ argsl=["--debug", str(script_name), "--input", str(script_name)],
executor=cwltool.executors.NoopJobExecutor(),
stdout=stream,
)
@@ -125,7 +118,7 @@ def test_dont_require_inputs() -> None:
)
assert (
main(
- argsl=["--debug", script.name],
+ argsl=["--debug", str(script_name)],
executor=cwltool.executors.NoopJobExecutor(),
stdout=stream,
)
@@ -133,7 +126,7 @@ def test_dont_require_inputs() -> None:
)
assert (
main(
- argsl=["--debug", script.name],
+ argsl=["--debug", str(script_name)],
executor=cwltool.executors.NoopJobExecutor(),
input_required=False,
stdout=stream,
@@ -142,10 +135,7 @@ def test_dont_require_inputs() -> None:
)
except SystemExit as err:
- assert err.code == 0, script.name if script else None
- finally:
- if script and script.name and os.path.exists(script.name):
- os.unlink(script.name)
+ assert err.code == 0, script_name if script else None
def test_argparser_with_doc() -> None:
diff --git a/tests/test_trs.py b/tests/test_trs.py
index 7ddd6a8629..acfee2f4ce 100644
--- a/tests/test_trs.py
+++ b/tests/test_trs.py
@@ -1,8 +1,7 @@
from typing import Any, Optional
+from unittest import mock
from unittest.mock import MagicMock
-import mock
-
from cwltool.main import main
from .util import get_data
@@ -22,7 +21,7 @@ def json(self) -> Any:
return self.json_data
-def mocked_requests_head(*args): # type: (*Any) -> MockResponse1
+def mocked_requests_head(*args: Any) -> MockResponse1:
return MockResponse1(None, 200)
@@ -41,9 +40,10 @@ def __init__(
def json(self) -> Any:
return self.json_data
+ headers = {"content-type": "text/plain"}
-def mocked_requests_get(*args): # type: (*Any) -> MockResponse2
+def mocked_requests_get(*args: Any, **kwargs: Any) -> MockResponse2:
if (
args[0]
== "https://dockstore.org/api/api/ga4gh/v2/tools/quay.io%2Fbriandoconnor%2Fdockstore-tool-md5sum/versions/1.0.4/CWL/files"
@@ -60,19 +60,19 @@ def mocked_requests_get(*args): # type: (*Any) -> MockResponse2
args[0]
== "https://dockstore.org/api/api/ga4gh/v2/tools/quay.io%2Fbriandoconnor%2Fdockstore-tool-md5sum/versions/1.0.4/plain-CWL/descriptor/Dockstore.cwl"
):
- string = open(get_data("tests/trs/Dockstore.cwl"), "r").read()
+ string = open(get_data("tests/trs/Dockstore.cwl")).read()
return MockResponse2(string, 200)
elif (
args[0]
== "https://dockstore.org/api/api/ga4gh/v2/tools/%23workflow%2Fgithub.com%2Fdockstore-testing%2Fmd5sum-checker/versions/develop/plain-CWL/descriptor/md5sum-tool.cwl"
):
- string = open(get_data("tests/trs/md5sum-tool.cwl"), "r").read()
+ string = open(get_data("tests/trs/md5sum-tool.cwl")).read()
return MockResponse2(string, 200)
elif (
args[0]
== "https://dockstore.org/api/api/ga4gh/v2/tools/%23workflow%2Fgithub.com%2Fdockstore-testing%2Fmd5sum-checker/versions/develop/plain-CWL/descriptor/md5sum-workflow.cwl"
):
- string = open(get_data("tests/trs/md5sum-workflow.cwl"), "r").read()
+ string = open(get_data("tests/trs/md5sum-workflow.cwl")).read()
return MockResponse2(string, 200)
elif (
args[0]
diff --git a/tests/test_udocker.py b/tests/test_udocker.py
index cad2f948fc..f8febc1a0e 100644
--- a/tests/test_udocker.py
+++ b/tests/test_udocker.py
@@ -1,23 +1,14 @@
"""Test optional udocker feature."""
import copy
import os
-import shutil
import subprocess
import sys
-
-try:
- from psutil.tests import TRAVIS # type: ignore
-except ImportError:
- TRAVIS = True
-
-
from pathlib import Path
-from typing import Generator
import pytest
from _pytest.tmpdir import TempPathFactory
-from .util import get_data, get_main_output
+from .util import get_data, get_main_output, working_directory
LINUX = sys.platform in ("linux", "linux2")
@@ -28,84 +19,79 @@ def udocker(tmp_path_factory: TempPathFactory) -> str:
test_cwd = os.getcwd()
test_environ = copy.copy(os.environ)
docker_install_dir = str(tmp_path_factory.mktemp("udocker"))
- os.chdir(docker_install_dir)
-
- url = "https://raw.githubusercontent.com/jorge-lip/udocker-builds/master/tarballs/udocker-1.1.4.tar.gz"
- install_cmds = [
- ["curl", url, "-o", "./udocker-tarball.tgz"],
- ["tar", "xzvf", "udocker-tarball.tgz", "udocker"],
- [
- "bash",
- "-c",
- "UDOCKER_TARBALL={}/udocker-tarball.tgz ./udocker install".format(
- docker_install_dir
- ),
- ],
- ]
-
- os.environ["UDOCKER_DIR"] = os.path.join(docker_install_dir, ".udocker")
- os.environ["HOME"] = docker_install_dir
-
- results = []
- for _ in range(3):
- results = [subprocess.call(cmds) for cmds in install_cmds]
- if sum(results) == 0:
- break
- subprocess.call(["rm", "./udocker"])
-
- assert sum(results) == 0
-
- udocker_path = os.path.join(docker_install_dir, "udocker")
- os.chdir(test_cwd)
- os.environ = test_environ
+ with working_directory(docker_install_dir):
+
+ url = "https://raw.githubusercontent.com/jorge-lip/udocker-builds/master/tarballs/udocker-1.1.4.tar.gz"
+ install_cmds = [
+ ["curl", url, "-o", "./udocker-tarball.tgz"],
+ ["tar", "xzvf", "udocker-tarball.tgz", "udocker"],
+ [
+ "bash",
+ "-c",
+ "UDOCKER_TARBALL={}/udocker-tarball.tgz ./udocker install".format(
+ docker_install_dir
+ ),
+ ],
+ ]
+
+ test_environ["UDOCKER_DIR"] = os.path.join(docker_install_dir, ".udocker")
+ test_environ["HOME"] = docker_install_dir
+
+ results = []
+ for _ in range(3):
+ results = [subprocess.call(cmds, env=test_environ) for cmds in install_cmds]
+ if sum(results) == 0:
+ break
+ subprocess.call(["rm", "./udocker"])
+
+ assert sum(results) == 0
+
+ udocker_path = os.path.join(docker_install_dir, "udocker")
+
return udocker_path
@pytest.mark.skipif(not LINUX, reason="LINUX only")
def test_udocker_usage_should_not_write_cid_file(udocker: str, tmp_path: Path) -> None:
"""Confirm that no cidfile is made when udocker is used."""
- cwd = Path.cwd()
- os.chdir(tmp_path)
-
- test_file = "tests/wf/wc-tool.cwl"
- job_file = "tests/wf/wc-job.json"
- error_code, stdout, stderr = get_main_output(
- [
- "--debug",
- "--default-container",
- "debian",
- "--user-space-docker-cmd=" + udocker,
- get_data(test_file),
- get_data(job_file),
- ]
- )
- cidfiles_count = sum(1 for _ in tmp_path.glob("*.cid"))
- os.chdir(cwd)
+ with working_directory(tmp_path):
+ test_file = "tests/wf/wc-tool.cwl"
+ job_file = "tests/wf/wc-job.json"
+ error_code, stdout, stderr = get_main_output(
+ [
+ "--debug",
+ "--default-container",
+ "debian",
+ "--user-space-docker-cmd=" + udocker,
+ get_data(test_file),
+ get_data(job_file),
+ ]
+ )
+
+ cidfiles_count = sum(1 for _ in tmp_path.glob("*.cid"))
assert "completed success" in stderr, stderr
assert cidfiles_count == 0
@pytest.mark.skipif(
- not LINUX or TRAVIS,
- reason="Linux only & not reliable on single threaded test on Travis-CI.",
+ not LINUX or "GITHUB" in os.environ,
+ reason="Linux only",
)
def test_udocker_should_display_memory_usage(udocker: str, tmp_path: Path) -> None:
"""Confirm that memory ussage is logged even with udocker."""
- cwd = Path.cwd()
- os.chdir(tmp_path)
- error_code, stdout, stderr = get_main_output(
- [
- "--enable-ext",
- "--default-container=debian",
- "--user-space-docker-cmd=" + udocker,
- get_data("tests/wf/timelimit.cwl"),
- "--sleep_time",
- "10",
- ]
- )
- os.chdir(cwd)
+ with working_directory(tmp_path):
+ error_code, stdout, stderr = get_main_output(
+ [
+ "--enable-ext",
+ "--default-container=debian",
+ "--user-space-docker-cmd=" + udocker,
+ get_data("tests/wf/timelimit.cwl"),
+ "--sleep_time",
+ "10",
+ ]
+ )
assert "completed success" in stderr, stderr
assert "Max memory" in stderr, stderr
diff --git a/tests/test_validate_js.py b/tests/test_validate_js.py
index 8a8ce75618..7047d1dc3f 100644
--- a/tests/test_validate_js.py
+++ b/tests/test_validate_js.py
@@ -1,7 +1,6 @@
-from typing import Any
-
-from ruamel import yaml
+import pytest
from schema_salad.avro.schema import Names
+from schema_salad.utils import yaml_no_ts
from cwltool import process, validate_js
from cwltool.sandboxjs import code_fragment_to_js
@@ -24,27 +23,32 @@
def test_get_expressions() -> None:
- test_cwl_yaml = yaml.main.round_trip_load(TEST_CWL)
+ yaml = yaml_no_ts()
+ test_cwl_yaml = yaml.load(TEST_CWL)
schema = process.get_schema("v1.0")[1]
assert isinstance(schema, Names)
- clt_schema = schema.names["CommandLineTool"]
+ clt_schema = schema.names["org.w3id.cwl.cwl.CommandLineTool"]
exprs = validate_js.get_expressions(test_cwl_yaml, clt_schema)
assert len(exprs) == 1
-def test_validate_js_expressions(mocker: Any) -> None:
- test_cwl_yaml = yaml.main.round_trip_load(TEST_CWL)
+def test_validate_js_expressions(caplog: pytest.LogCaptureFixture) -> None:
+ """Test invalid JS expression."""
+ yaml = yaml_no_ts()
+ test_cwl_yaml = yaml.load(TEST_CWL)
schema = process.get_schema("v1.0")[1]
assert isinstance(schema, Names)
- clt_schema = schema.names["CommandLineTool"]
+ clt_schema = schema.names["org.w3id.cwl.cwl.CommandLineTool"]
- mocker.patch("cwltool.validate_js._logger")
- # mocker.patch("cwltool.validate_js.print_js_hint_messages")
validate_js.validate_js_expressions(test_cwl_yaml, clt_schema)
- validate_js._logger.warning.assert_called_with(" JSHINT: (function(){return ((kjdbfkjd));})()\n JSHINT: ^\n JSHINT: W117: 'kjdbfkjd' is not defined.") # type: ignore
+ assert (
+ " JSHINT: (function(){return ((kjdbfkjd));})()\n"
+ " JSHINT: ^\n"
+ " JSHINT: W117: 'kjdbfkjd' is not defined."
+ ) in caplog.text
def test_js_hint_basic() -> None:
diff --git a/tests/test_windows_warning.py b/tests/test_windows_warning.py
new file mode 100644
index 0000000000..a480f8fb01
--- /dev/null
+++ b/tests/test_windows_warning.py
@@ -0,0 +1,22 @@
+"""Test user experience running on MS Windows."""
+
+import os
+
+import pytest
+
+from cwltool import main
+
+# Can't be just "import cwltool ; … cwltool.main.windows_check()"
+# needs a direct import to avoid path traversal after os.name is set to "nt"
+
+
+def test_windows_warning(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Confirm that the windows warning is given."""
+ with pytest.warns(UserWarning, match=r"Windows Subsystem for Linux 2"):
+ # would normally just use the MonkeyPatch object directly
+ # but if we don't use a context then os.name being "nt" causes problems
+ # for pytest on non-Windows systems. So the context unravels the change
+ # to os.name quickly, and then pytest will check for the desired warning
+ with monkeypatch.context() as m:
+ m.setattr(os, "name", "nt")
+ main.windows_check()
diff --git a/tests/util.py b/tests/util.py
index f79fdefdc5..fbca2fb151 100644
--- a/tests/util.py
+++ b/tests/util.py
@@ -1,38 +1,18 @@
import contextlib
-import distutils.spawn # pylint: disable=no-name-in-module,import-error
-import functools
+import io
+import json
import os
import shutil
import subprocess
-import sys
-import tempfile
from pathlib import Path
-from typing import Generator, List, Mapping, Optional, Tuple, Union
+from typing import Dict, Generator, List, Mapping, Optional, Tuple, Union
import pytest
from pkg_resources import Requirement, ResolutionError, resource_filename
-from cwltool.context import LoadingContext, RuntimeContext
-from cwltool.executors import JobExecutor
-from cwltool.factory import Factory
+from cwltool.env_to_stdout import deserialize_env
+from cwltool.main import main
from cwltool.singularity import is_version_2_6, is_version_3_or_newer
-from cwltool.utils import onWindows, windows_default_container_id
-
-
-def get_windows_safe_factory(
- runtime_context: Optional[RuntimeContext] = None,
- loading_context: Optional[LoadingContext] = None,
- executor: Optional[JobExecutor] = None,
-) -> Factory:
- if onWindows():
- if not runtime_context:
- runtime_context = RuntimeContext()
- runtime_context.find_default_container = functools.partial(
- force_default_container, windows_default_container_id
- )
- runtime_context.use_container = True
- runtime_context.default_container = windows_default_container_id
- return Factory(executor, loading_context, runtime_context)
def force_default_container(default_container_id: str, _: str) -> str:
@@ -53,71 +33,130 @@ def get_data(filename: str) -> str:
needs_docker = pytest.mark.skipif(
- not bool(distutils.spawn.find_executable("docker")),
+ not bool(shutil.which("docker")),
reason="Requires the docker executable on the system path.",
)
needs_singularity = pytest.mark.skipif(
- not bool(distutils.spawn.find_executable("singularity")),
+ not bool(shutil.which("singularity")),
reason="Requires the singularity executable on the system path.",
)
needs_singularity_2_6 = pytest.mark.skipif(
- not bool(distutils.spawn.find_executable("singularity") and is_version_2_6()),
+ not bool(shutil.which("singularity") and is_version_2_6()),
reason="Requires that version 2.6.x of singularity executable version is on the system path.",
)
needs_singularity_3_or_newer = pytest.mark.skipif(
- (not bool(distutils.spawn.find_executable("singularity")))
- or (not is_version_3_or_newer()),
+ (not bool(shutil.which("singularity"))) or (not is_version_3_or_newer()),
reason="Requires that version 3.x of singularity executable version is on the system path.",
)
+_env_accepts_null: Optional[bool] = None
-windows_needs_docker = pytest.mark.skipif(
- onWindows() and not bool(distutils.spawn.find_executable("docker")),
- reason="Running this test on MS Windows requires the docker executable "
- "on the system path.",
-)
+
+def env_accepts_null() -> bool:
+ """Return True iff the env command on this host accepts `-0`."""
+ global _env_accepts_null
+ if _env_accepts_null is None:
+ result = subprocess.run(
+ ["env", "-0"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ encoding="utf-8",
+ )
+ _env_accepts_null = result.returncode == 0
+
+ return _env_accepts_null
def get_main_output(
args: List[str],
- env: Union[
- Mapping[bytes, Union[bytes, str]], Mapping[str, Union[bytes, str]], None
- ] = None,
+ replacement_env: Optional[Mapping[str, str]] = None,
+ extra_env: Optional[Mapping[str, str]] = None,
+ monkeypatch: Optional[pytest.MonkeyPatch] = None,
) -> Tuple[Optional[int], str, str]:
- process = subprocess.Popen(
- [sys.executable, "-m", "cwltool"] + args,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env=env,
- )
+ """Run cwltool main.
+
+ args: the command line args to call it with
+
+ replacement_env: a total replacement of the environment
+
+ extra_env: add these to the environment used
- stdout, stderr = process.communicate()
+ monkeypatch: required if changing the environment
+
+ Returns (return code, stdout, stderr)
+ """
+ stdout = io.StringIO()
+ stderr = io.StringIO()
+ if replacement_env is not None:
+ assert monkeypatch is not None
+ monkeypatch.setattr(os, "environ", replacement_env)
+
+ if extra_env is not None:
+ assert monkeypatch is not None
+ for k, v in extra_env.items():
+ monkeypatch.setenv(k, v)
+
+ try:
+ rc = main(argsl=args, stdout=stdout, stderr=stderr)
+ except SystemExit as e:
+ rc = e.code
return (
- process.returncode,
- stdout.decode() if stdout else "",
- stderr.decode() if stderr else "",
+ rc,
+ stdout.getvalue(),
+ stderr.getvalue(),
)
-@contextlib.contextmanager
-def temp_dir(suffix: str = "") -> Generator[str, None, None]:
- c_dir = tempfile.mkdtemp(suffix, dir=os.curdir)
- try:
- yield c_dir
- finally:
- shutil.rmtree(c_dir, ignore_errors=True)
+def get_tool_env(
+ tmp_path: Path,
+ flag_args: List[str],
+ inputs_file: Optional[str] = None,
+ replacement_env: Optional[Mapping[str, str]] = None,
+ extra_env: Optional[Mapping[str, str]] = None,
+ monkeypatch: Optional[pytest.MonkeyPatch] = None,
+ runtime_env_accepts_null: Optional[bool] = None,
+) -> Dict[str, str]:
+ """Get the env vars for a tool's invocation."""
+ # GNU env accepts the -0 option to end each variable's
+ # printing with "\0". No such luck on BSD-ish.
+ #
+ # runtime_env_accepts_null is None => figure it out, otherwise
+ # use wrapped bool (because containers).
+ if runtime_env_accepts_null is None:
+ runtime_env_accepts_null = env_accepts_null()
+
+ args = flag_args.copy()
+ if runtime_env_accepts_null:
+ args.append(get_data("tests/env3.cwl"))
+ else:
+ args.append(get_data("tests/env4.cwl"))
+
+ if inputs_file is not None:
+ args.append(inputs_file)
+
+ with working_directory(tmp_path):
+ rc, stdout, _ = get_main_output(
+ args,
+ replacement_env=replacement_env,
+ extra_env=extra_env,
+ monkeypatch=monkeypatch,
+ )
+ assert rc == 0
+
+ output = json.loads(stdout)
+ with open(output["env"]["path"]) as _:
+ return deserialize_env(_.read())
@contextlib.contextmanager
def working_directory(path: Union[str, Path]) -> Generator[None, None, None]:
"""Change working directory and returns to previous on exit."""
prev_cwd = Path.cwd()
- # before python 3.6 chdir doesn't support paths from pathlib
- os.chdir(str(path))
+ os.chdir(path)
try:
yield
finally:
- os.chdir(str(prev_cwd))
+ os.chdir(prev_cwd)
diff --git a/tests/wc-tool-bad-hints.cwl b/tests/wc-tool-bad-hints.cwl
new file mode 100644
index 0000000000..bbde0e2110
--- /dev/null
+++ b/tests/wc-tool-bad-hints.cwl
@@ -0,0 +1,19 @@
+#!/usr/bin/env cwl-runner
+
+class: CommandLineTool
+cwlVersion: v1.1
+
+hints:
+
+inputs:
+ file1: File
+
+outputs:
+ output:
+ type: File
+ outputBinding: { glob: output }
+
+baseCommand: [wc, -l]
+
+stdin: $(inputs.file1.path)
+stdout: output
diff --git a/tests/wc-tool-bad-reqs.cwl b/tests/wc-tool-bad-reqs.cwl
new file mode 100644
index 0000000000..96e879b69c
--- /dev/null
+++ b/tests/wc-tool-bad-reqs.cwl
@@ -0,0 +1,19 @@
+#!/usr/bin/env cwl-runner
+
+class: CommandLineTool
+cwlVersion: v1.1
+
+requirements:
+
+inputs:
+ file1: File
+
+outputs:
+ output:
+ type: File
+ outputBinding: { glob: output }
+
+baseCommand: [wc, -l]
+
+stdin: $(inputs.file1.path)
+stdout: output
diff --git a/tests/wf/1496.cwl b/tests/wf/1496.cwl
new file mode 100644
index 0000000000..74f6ef49e3
--- /dev/null
+++ b/tests/wf/1496.cwl
@@ -0,0 +1,15 @@
+cwlVersion: v1.2
+class: CommandLineTool
+
+baseCommand: echo
+
+inputs:
+ index:
+ type: Directory
+ inputBinding: {}
+
+outputs:
+ salmon_index:
+ type: Directory
+ outputBinding:
+ glob: "$(inputs.index)" # not a valid glob, result needs to be a string, not a Directory object
diff --git a/tests/wf/bad-stderr-expr.cwl b/tests/wf/bad-stderr-expr.cwl
new file mode 100644
index 0000000000..cfb1f06d9b
--- /dev/null
+++ b/tests/wf/bad-stderr-expr.cwl
@@ -0,0 +1,18 @@
+#!/usr/bin/env cwl-runner
+
+class: CommandLineTool
+cwlVersion: v1.0
+
+inputs:
+ file1: File
+
+outputs:
+ output:
+ type: File
+ outputBinding: { glob: output }
+
+baseCommand: [cat]
+
+stdin: $(inputs.file1.path)
+stdout: output
+stderr: $(inputs.file1.size)
diff --git a/tests/wf/bad-stdin-expr.cwl b/tests/wf/bad-stdin-expr.cwl
new file mode 100644
index 0000000000..b3638c68f5
--- /dev/null
+++ b/tests/wf/bad-stdin-expr.cwl
@@ -0,0 +1,17 @@
+#!/usr/bin/env cwl-runner
+
+class: CommandLineTool
+cwlVersion: v1.0
+
+inputs:
+ file1: File
+
+outputs:
+ output:
+ type: File
+ outputBinding: { glob: output }
+
+baseCommand: [cat]
+
+stdin: $(inputs.file1.size)
+stdout: output
diff --git a/tests/wf/bad-stdout-expr.cwl b/tests/wf/bad-stdout-expr.cwl
new file mode 100644
index 0000000000..29c109675d
--- /dev/null
+++ b/tests/wf/bad-stdout-expr.cwl
@@ -0,0 +1,17 @@
+#!/usr/bin/env cwl-runner
+
+class: CommandLineTool
+cwlVersion: v1.0
+
+inputs:
+ file1: File
+
+outputs:
+ output:
+ type: File
+ outputBinding: { glob: output }
+
+baseCommand: [cat]
+
+stdin: $(inputs.file1.path)
+stdout: $(inputs.file1.size)
diff --git a/tests/wf/bad_formattest.cwl b/tests/wf/bad_formattest.cwl
new file mode 100755
index 0000000000..6d936f60d8
--- /dev/null
+++ b/tests/wf/bad_formattest.cwl
@@ -0,0 +1,25 @@
+#!/usr/bin/env cwl-runner
+$namespaces:
+ edam: "http://edamontology.org/"
+cwlVersion: v1.0
+class: CommandLineTool
+requirements:
+ InlineJavascriptRequirement: {}
+doc: "Reverse each line using the `rev` command"
+inputs:
+ input:
+ type: File
+ inputBinding: {}
+ format: |
+ ${ return ["http://edamontology.org/format_2330", 42];}
+
+outputs:
+ output:
+ type: File
+ outputBinding:
+ glob: output.txt
+ format: |
+ ${return "http://edamontology.org/format_2330";}
+
+baseCommand: rev
+stdout: output.txt
diff --git a/tests/wf/bad_networkaccess.cwl b/tests/wf/bad_networkaccess.cwl
new file mode 100644
index 0000000000..e30c81a5ac
--- /dev/null
+++ b/tests/wf/bad_networkaccess.cwl
@@ -0,0 +1,10 @@
+#!/usr/bin/env cwl-runner
+class: CommandLineTool
+cwlVersion: v1.1
+requirements:
+ InlineJavascriptRequirement: {}
+ NetworkAccess:
+ networkAccess: '${return 42;}'
+inputs: []
+outputs: []
+baseCommand: echo, Hello, World!
diff --git a/tests/wf/bad_timelimit.cwl b/tests/wf/bad_timelimit.cwl
new file mode 100644
index 0000000000..1dd0308bc5
--- /dev/null
+++ b/tests/wf/bad_timelimit.cwl
@@ -0,0 +1,14 @@
+#!/usr/bin/env cwl-runner
+class: CommandLineTool
+cwlVersion: v1.1
+inputs:
+ sleep_time:
+ type: int
+ default: 3
+ inputBinding: {}
+outputs: []
+requirements:
+ InlineJavascriptRequirement: {}
+ ToolTimeLimit:
+ timelimit: '${return "42";}'
+baseCommand: sleep
diff --git a/tests/wf/conflict.cwl b/tests/wf/conflict.cwl
index 9f052cbc3c..21ed20b1fe 100644
--- a/tests/wf/conflict.cwl
+++ b/tests/wf/conflict.cwl
@@ -1,3 +1,4 @@
+#!/usr/bin/env cwl-runner
cwlVersion: v1.1
$graph:
- class: CommandLineTool
diff --git a/tests/wf/expect_packed.cwl b/tests/wf/expect_packed.cwl
index 9f0be2c862..a816e4d2cc 100644
--- a/tests/wf/expect_packed.cwl
+++ b/tests/wf/expect_packed.cwl
@@ -1,4 +1,3 @@
-#!/usr/bin/env cwl-runner
{
"$graph": [
{
@@ -68,10 +67,7 @@
"id": "#main/sorted"
}
],
- "id": "#main",
- "$namespaces": {
- "iana": "https://www.iana.org/assignments/media-types/"
- }
+ "id": "#main"
},
{
"class": "CommandLineTool",
@@ -134,5 +130,8 @@
"$schemas": [
"empty.ttl",
"empty2.ttl"
- ]
+ ],
+ "$namespaces": {
+ "iana": "https://www.iana.org/assignments/media-types/"
+ }
}
diff --git a/tests/wf/expect_revsort_datetime_packed.cwl b/tests/wf/expect_revsort_datetime_packed.cwl
new file mode 100644
index 0000000000..172a030b04
--- /dev/null
+++ b/tests/wf/expect_revsort_datetime_packed.cwl
@@ -0,0 +1,140 @@
+{
+ "$graph": [
+ {
+ "class": "Workflow",
+ "doc": "Reverse the lines in a document, then sort those lines.",
+ "hints": [
+ {
+ "class": "DockerRequirement",
+ "dockerPull": "debian:8"
+ }
+ ],
+ "inputs": [
+ {
+ "type": "boolean",
+ "default": true,
+ "doc": "If true, reverse (decending) sort",
+ "id": "#main/reverse_sort"
+ },
+ {
+ "type": "File",
+ "doc": "The input file to be processed.",
+ "format": "https://www.iana.org/assignments/media-types/text/plain",
+ "default": {
+ "class": "File",
+ "location": "hello.txt"
+ },
+ "id": "#main/workflow_input"
+ }
+ ],
+ "outputs": [
+ {
+ "type": "File",
+ "outputSource": "#main/sorted/sorted_output",
+ "doc": "The output with the lines reversed and sorted.",
+ "id": "#main/sorted_output"
+ }
+ ],
+ "steps": [
+ {
+ "in": [
+ {
+ "source": "#main/workflow_input",
+ "id": "#main/rev/revtool_input"
+ }
+ ],
+ "out": [
+ "#main/rev/revtool_output"
+ ],
+ "run": "#revtool.cwl",
+ "id": "#main/rev"
+ },
+ {
+ "in": [
+ {
+ "source": "#main/reverse_sort",
+ "id": "#main/sorted/reverse"
+ },
+ {
+ "source": "#main/rev/revtool_output",
+ "id": "#main/sorted/sorted_input"
+ }
+ ],
+ "out": [
+ "#main/sorted/sorted_output"
+ ],
+ "run": "#sorttool.cwl",
+ "id": "#main/sorted"
+ }
+ ],
+ "id": "#main",
+ "http://schema.org/dateCreated": "2020-10-08"
+ },
+ {
+ "class": "CommandLineTool",
+ "doc": "Reverse each line using the `rev` command",
+ "inputs": [
+ {
+ "type": "File",
+ "inputBinding": {},
+ "id": "#revtool.cwl/revtool_input"
+ }
+ ],
+ "outputs": [
+ {
+ "type": "File",
+ "outputBinding": {
+ "glob": "output.txt"
+ },
+ "id": "#revtool.cwl/revtool_output"
+ }
+ ],
+ "baseCommand": "rev",
+ "stdout": "output.txt",
+ "id": "#revtool.cwl"
+ },
+ {
+ "class": "CommandLineTool",
+ "doc": "Sort lines using the `sort` command",
+ "inputs": [
+ {
+ "id": "#sorttool.cwl/reverse",
+ "type": "boolean",
+ "inputBinding": {
+ "position": 1,
+ "prefix": "--reverse"
+ }
+ },
+ {
+ "id": "#sorttool.cwl/sorted_input",
+ "type": "File",
+ "inputBinding": {
+ "position": 2
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "id": "#sorttool.cwl/sorted_output",
+ "type": "File",
+ "outputBinding": {
+ "glob": "output.txt"
+ }
+ }
+ ],
+ "baseCommand": "sort",
+ "stdout": "output.txt",
+ "id": "#sorttool.cwl"
+ }
+ ],
+ "cwlVersion": "v1.0",
+ "$schemas": [
+ "empty2.ttl",
+ "https://schema.org/version/latest/schemaorg-current-https.rdf",
+ "empty.ttl"
+ ],
+ "$namespaces": {
+ "iana": "https://www.iana.org/assignments/media-types/",
+ "s": "http://schema.org/"
+ }
+}
diff --git a/tests/wf/expect_trick_packed.cwl b/tests/wf/expect_trick_packed.cwl
index b946546390..1b2a0f937a 100644
--- a/tests/wf/expect_trick_packed.cwl
+++ b/tests/wf/expect_trick_packed.cwl
@@ -21,10 +21,7 @@
],
"baseCommand": "rev",
"stdout": "output.txt",
- "id": "#revtool.cwl",
- "$namespaces": {
- "iana": "https://www.iana.org/assignments/media-types/"
- }
+ "id": "#revtool.cwl"
},
{
"class": "CommandLineTool",
@@ -138,5 +135,8 @@
"$schemas": [
"empty2.ttl",
"empty.ttl"
- ]
-}
\ No newline at end of file
+ ],
+ "$namespaces": {
+ "iana": "https://www.iana.org/assignments/media-types/"
+ }
+}
diff --git a/tests/wf/generator/pytoolgen.cwl b/tests/wf/generator/pytoolgen.cwl
index 8e39c3c394..3d8ce78797 100644
--- a/tests/wf/generator/pytoolgen.cwl
+++ b/tests/wf/generator/pytoolgen.cwl
@@ -1,7 +1,8 @@
+#!/usr/bin/env cwl-runner
cwlVersion: v1.0
$namespaces:
cwltool: "http://commonwl.org/cwltool#"
-class: ProcessGenerator
+class: cwltool:ProcessGenerator
inputs:
script: string
dir: Directory
diff --git a/tests/wf/indir/hello2.txt b/tests/wf/indir/hello2.txt
new file mode 100644
index 0000000000..14be0d41c6
--- /dev/null
+++ b/tests/wf/indir/hello2.txt
@@ -0,0 +1 @@
+hello2
diff --git a/tests/wf/iwdr_permutations.cwl b/tests/wf/iwdr_permutations.cwl
index b838cc2ade..bf154875ef 100755
--- a/tests/wf/iwdr_permutations.cwl
+++ b/tests/wf/iwdr_permutations.cwl
@@ -1,7 +1,10 @@
#!/usr/bin/env cwl-runner
class: CommandLineTool
-cwlVersion: v1.2.0-dev4
+cwlVersion: v1.2
requirements:
+ EnvVarRequirement:
+ envDef:
+ LC_ALL: C
DockerRequirement:
dockerPull: debian
InitialWorkDirRequirement:
@@ -36,6 +39,7 @@ requirements:
- entry: $(inputs.tenth)
entryname: /my_path/tenth_writable_directory_literal
writable: true
+ - entry: $(inputs.eleventh) # array of Files
- entry: baz
entryname: /my_path/my_file_literal
inputs:
@@ -59,18 +63,24 @@ inputs:
class: Directory
basename: bar
listing: []
+ eleventh: File[]
outputs:
out:
type: Directory
outputBinding:
glob: .
+ log: stdout
+stdout: log.txt
baseCommand: [bash, -c]
arguments:
- |
- find .
- find /my_path
- find /my_other_path
+ find . | sort
+ find /my_path | sort
+ find /my_other_path | sort
echo "a" > first_writable_file
echo "b" > /my_path/third_writable_file
touch fifth_writable_directory/c
touch /my_path/seventh_writable_directory/d
+ find . | sort
+ find /my_path | sort
+ find /my_other_path | sort
diff --git a/tests/wf/iwdr_permutations_nocontainer.cwl b/tests/wf/iwdr_permutations_nocontainer.cwl
new file mode 100755
index 0000000000..0ef2e2646d
--- /dev/null
+++ b/tests/wf/iwdr_permutations_nocontainer.cwl
@@ -0,0 +1,43 @@
+#!/usr/bin/env cwl-runner
+class: CommandLineTool
+cwlVersion: v1.2
+requirements:
+ InitialWorkDirRequirement:
+ listing:
+ - entry: $(inputs.first)
+ entryname: first_writable_file
+ writable: true
+ - entry: $(inputs.second)
+ entryname: second_read_only_file
+ writable: false
+ - entry: $(inputs.fifth)
+ entryname: fifth_writable_directory
+ writable: true
+ - entry: $(inputs.sixth)
+ entryname: sixth_read_only_directory
+ writable: false
+ - entry: $(inputs.ninth)
+ entryname: nineth_writable_directory_literal
+ writable: true
+inputs:
+ first: File
+ second: File
+ fifth: Directory
+ sixth: Directory
+ ninth:
+ type: Directory
+ default:
+ class: Directory
+ basename: foo
+ listing: []
+outputs:
+ out:
+ type: Directory
+ outputBinding:
+ glob: .
+baseCommand: [bash, -c]
+arguments:
+ - |
+ find .
+ echo "a" > first_writable_file
+ touch fifth_writable_directory/c
diff --git a/tests/wf/literalfile.cwl b/tests/wf/literalfile.cwl
index 489b096bf9..6c212314e1 100644
--- a/tests/wf/literalfile.cwl
+++ b/tests/wf/literalfile.cwl
@@ -1,3 +1,4 @@
+#!/usr/bin/env cwl-runner
cwlVersion: v1.0
class: CommandLineTool
inputs:
diff --git a/tests/wf/operation/abstract-cosifer.cwl b/tests/wf/operation/abstract-cosifer.cwl
new file mode 100644
index 0000000000..49f630e913
--- /dev/null
+++ b/tests/wf/operation/abstract-cosifer.cwl
@@ -0,0 +1,25 @@
+class: Operation
+cwlVersion: v1.2
+
+requirements:
+ DockerRequirement:
+ dockerPull: 'tsenit/cosifer:b4d5af45d2fc54b6bff2a9153a8e9054e560302e'
+
+inputs:
+ data_matrix:
+ type: File
+ separator:
+ type: string?
+ doc: The separator used in the data_matrix file
+ index_col:
+ type: int?
+ gmt_filepath:
+ type: File?
+ outdir:
+ type: string?
+ samples_on_rows:
+ type: boolean?
+
+outputs:
+ resdir:
+ type: Directory
diff --git a/tests/wf/operation/expect_operation-single_packed.cwl b/tests/wf/operation/expect_operation-single_packed.cwl
new file mode 100644
index 0000000000..7e01f69733
--- /dev/null
+++ b/tests/wf/operation/expect_operation-single_packed.cwl
@@ -0,0 +1,154 @@
+{
+ "$graph": [
+ {
+ "class": "Operation",
+ "requirements": [
+ {
+ "dockerPull": "tsenit/cosifer:b4d5af45d2fc54b6bff2a9153a8e9054e560302e",
+ "class": "DockerRequirement"
+ }
+ ],
+ "inputs": [
+ {
+ "type": "File",
+ "id": "#abstract-cosifer.cwl/data_matrix"
+ },
+ {
+ "type": [
+ "null",
+ "File"
+ ],
+ "id": "#abstract-cosifer.cwl/gmt_filepath"
+ },
+ {
+ "type": [
+ "null",
+ "int"
+ ],
+ "id": "#abstract-cosifer.cwl/index_col"
+ },
+ {
+ "type": [
+ "null",
+ "string"
+ ],
+ "id": "#abstract-cosifer.cwl/outdir"
+ },
+ {
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "id": "#abstract-cosifer.cwl/samples_on_rows"
+ },
+ {
+ "type": [
+ "null",
+ "string"
+ ],
+ "doc": "The separator used in the data_matrix file",
+ "id": "#abstract-cosifer.cwl/separator"
+ }
+ ],
+ "outputs": [
+ {
+ "type": "Directory",
+ "id": "#abstract-cosifer.cwl/resdir"
+ }
+ ],
+ "id": "#abstract-cosifer.cwl"
+ },
+ {
+ "class": "Workflow",
+ "id": "#main",
+ "label": "abstract-cosifer-workflow",
+ "inputs": [
+ {
+ "type": "File",
+ "doc": "Gene expression data matrix",
+ "id": "#data_matrix"
+ },
+ {
+ "type": [
+ "null",
+ "File"
+ ],
+ "doc": "Optional GMT file to perform inference on multiple gene sets",
+ "id": "#gmt_filepath"
+ },
+ {
+ "type": [
+ "null",
+ "int"
+ ],
+ "doc": "Column index in the data. Defaults to None, a.k.a., no index",
+ "id": "#index_col"
+ },
+ {
+ "type": "string",
+ "doc": "Path to the output directory",
+ "id": "#outdir"
+ },
+ {
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "doc": "Flag that indicates that data contain the samples on rows. Defaults to False.",
+ "id": "#samples_on_rows"
+ },
+ {
+ "type": [
+ "null",
+ "string"
+ ],
+ "doc": "Separator for the data. Defaults to .",
+ "id": "#separator"
+ }
+ ],
+ "outputs": [
+ {
+ "type": "Directory",
+ "outputSource": "#/abstract_cosifer/resdir",
+ "id": "#resdir"
+ }
+ ],
+ "steps": [
+ {
+ "run": "#abstract-cosifer.cwl",
+ "in": [
+ {
+ "source": "#data_matrix",
+ "id": "#abstract_cosifer/data_matrix"
+ },
+ {
+ "source": "#gmt_filepath",
+ "id": "#abstract_cosifer/gmt_filepath"
+ },
+ {
+ "source": "#index_col",
+ "id": "#abstract_cosifer/index_col"
+ },
+ {
+ "source": "#outdir",
+ "id": "#abstract_cosifer/outdir"
+ },
+ {
+ "source": "#samples_on_rows",
+ "id": "#abstract_cosifer/samples_on_rows"
+ },
+ {
+ "source": "#separator",
+ "id": "#abstract_cosifer/separator"
+ }
+ ],
+ "out": [
+ "#/abstract_cosifer/resdir"
+ ],
+ "id": "#abstract_cosifer"
+ }
+ ]
+ }
+ ],
+ "cwlVersion": "v1.2"
+}
\ No newline at end of file
diff --git a/tests/wf/operation/operation-single.cwl b/tests/wf/operation/operation-single.cwl
new file mode 100644
index 0000000000..2119bf02e1
--- /dev/null
+++ b/tests/wf/operation/operation-single.cwl
@@ -0,0 +1,27 @@
+class: Workflow
+cwlVersion: v1.2
+id: abstract_cosifer_workflow
+label: abstract-cosifer-workflow
+
+inputs:
+ data_matrix: {type: File, doc: "Gene expression data matrix"}
+ gmt_filepath: {type: "File?", doc: "Optional GMT file to perform inference on multiple gene sets"}
+ index_col: {type: "int?", doc: "Column index in the data. Defaults to None, a.k.a., no index"}
+ outdir: {type: string, doc: "Path to the output directory"}
+ separator: {type: "string?", doc: "Separator for the data. Defaults to ."}
+ samples_on_rows: {type: "boolean?", doc: "Flag that indicates that data contain the samples on rows. Defaults to False."}
+
+outputs:
+ resdir: {type: Directory, outputSource: abstract_cosifer/resdir}
+
+steps:
+ abstract_cosifer:
+ run: abstract-cosifer.cwl
+ in:
+ data_matrix: data_matrix
+ separator: separator
+ index_col: index_col
+ gmt_filepath: gmt_filepath
+ outdir: outdir
+ samples_on_rows: samples_on_rows
+ out: [resdir]
diff --git a/tests/wf/packed-with-loadlisting.cwl b/tests/wf/packed-with-loadlisting.cwl
index a9dd178535..dff5c4ad9b 100644
--- a/tests/wf/packed-with-loadlisting.cwl
+++ b/tests/wf/packed-with-loadlisting.cwl
@@ -1,3 +1,4 @@
+#!/usr/bin/env cwl-runner
{
"$graph": [
{
diff --git a/tests/wf/paramref_arguments_self.cwl b/tests/wf/paramref_arguments_self.cwl
new file mode 100644
index 0000000000..9795f5dd05
--- /dev/null
+++ b/tests/wf/paramref_arguments_self.cwl
@@ -0,0 +1,41 @@
+#!/usr/bin/env cwl-runner
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs:
+ self_review:
+ type: File
+ outputSource: evaluate_self/self_review
+steps:
+ dump_self:
+ run:
+ class: CommandLineTool
+ baseCommand: echo
+ inputs: []
+ arguments:
+ - '{"self":$(self)}'
+ stdout: self.json
+ outputs:
+ self_json: stdout
+ in: []
+ out: [self_json]
+ evaluate_self:
+ run:
+ class: CommandLineTool
+ hints:
+ DockerRequirement:
+ dockerPull: everpeace/curl-jq
+ inputs:
+ self:
+ type: File
+ inputBinding:
+ position: 2
+ stdout: self_review.txt
+ outputs:
+ self_review: stdout
+ baseCommand: jq
+ arguments:
+ - valueFrom: '.self | type == "null"'
+ position: 1
+ in: { self: dump_self/self_json }
+ out: [ self_review ]
diff --git a/tests/wf/revsort_datetime.cwl b/tests/wf/revsort_datetime.cwl
new file mode 100755
index 0000000000..812b384f5d
--- /dev/null
+++ b/tests/wf/revsort_datetime.cwl
@@ -0,0 +1,80 @@
+#!/usr/bin/env cwl-runner
+#
+# This is a two-step workflow which uses "revtool" and "sorttool" defined above.
+#
+class: Workflow
+doc: "Reverse the lines in a document, then sort those lines."
+cwlVersion: v1.0
+
+# Requirements & hints specify prerequisites and extensions to the workflow.
+# In this example, DockerRequirement specifies a default Docker container
+# in which the command line tools will execute.
+hints:
+ - class: DockerRequirement
+ dockerPull: debian:8
+
+
+# The inputs array defines the structure of the input object that describes
+# the inputs to the workflow.
+#
+# The "reverse_sort" input parameter demonstrates the "default" field. If the
+# field "reverse_sort" is not provided in the input object, the default value will
+# be used.
+inputs:
+ workflow_input:
+ type: File
+ doc: "The input file to be processed."
+ format: iana:text/plain
+ default:
+ class: File
+ location: hello.txt
+ reverse_sort:
+ type: boolean
+ default: true
+ doc: "If true, reverse (decending) sort"
+
+# The "outputs" array defines the structure of the output object that describes
+# the outputs of the workflow.
+#
+# Each output field must be connected to the output of one of the workflow
+# steps using the "outputSource" field. Here, the parameter "sorted_output" of the
+# workflow comes from the "sorted_output" output of the "sorted" step.
+outputs:
+ sorted_output:
+ type: File
+ outputSource: sorted/sorted_output
+ doc: "The output with the lines reversed and sorted."
+
+# The "steps" array lists the executable steps that make up the workflow.
+# The tool to execute each step is listed in the "run" field.
+#
+# In the first step, the "in" field of the step connects the upstream
+# parameter "workflow_input" of the workflow to the input parameter of the tool
+# "revtool_input"
+#
+# In the second step, the "in" field of the step connects the output
+# parameter "revtool_output" from the first step to the input parameter of the
+# tool "sorted_input".
+steps:
+ rev:
+ in:
+ revtool_input: workflow_input
+ out: [revtool_output]
+ run: revtool.cwl
+
+ sorted:
+ in:
+ sorted_input: rev/revtool_output
+ reverse: reverse_sort
+ out: [sorted_output]
+ run: sorttool.cwl
+
+$namespaces:
+ iana: https://www.iana.org/assignments/media-types/
+ s: http://schema.org/
+
+$schemas:
+ - https://schema.org/version/latest/schemaorg-current-https.rdf
+ - empty2.ttl
+
+s:dateCreated: 2020-10-08
diff --git a/tests/wf/scatter2.cwl b/tests/wf/scatter2.cwl
index 79c3854d8e..5c829102b4 100644
--- a/tests/wf/scatter2.cwl
+++ b/tests/wf/scatter2.cwl
@@ -1,3 +1,4 @@
+#!/usr/bin/env cwl-runner
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
diff --git a/tests/wf/scatter2_subwf.cwl b/tests/wf/scatter2_subwf.cwl
index 2c41367fde..0e2b4b129d 100644
--- a/tests/wf/scatter2_subwf.cwl
+++ b/tests/wf/scatter2_subwf.cwl
@@ -1,3 +1,4 @@
+#!/usr/bin/env cwl-runner
# Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
@@ -71,5 +72,8 @@
]
}
],
- "cwlVersion": "v1.0"
+ "cwlVersion": "v1.0",
+ "$namespaces": {
+ "arv": "http://arvados.org/cwl#"
+ }
}
diff --git a/tests/wf/schemadef-bug-1473.cwl b/tests/wf/schemadef-bug-1473.cwl
new file mode 100644
index 0000000000..47744032f4
--- /dev/null
+++ b/tests/wf/schemadef-bug-1473.cwl
@@ -0,0 +1,1459 @@
+{
+ "$graph": [
+ {
+ "class": "ExpressionTool",
+ "id": "#flatten-array-fastq-list__1.0.0.cwl",
+ "label": "flatten-array-fastq-list-schema v(1.0.0)",
+ "doc": "Documentation for flatten-array-fastq-list-schema v1.0.0\n",
+ "requirements": [
+ {
+ "class": "InlineJavascriptRequirement"
+ },
+ {
+ "types": [
+ {
+ "type": "record",
+ "name": "#fastq-list-row__1.0.0.yaml/fastq-list-row",
+ "fields": [
+ {
+ "label": "lane",
+ "doc": "The lane that the sample was run on\n",
+ "type": "int",
+ "name": "#fastq-list-row__1.0.0.yaml/fastq-list-row/lane"
+ },
+ {
+ "label": "read 1",
+ "doc": "The path to R1 of a sample\n",
+ "type": "File",
+ "streamable": true,
+ "name": "#fastq-list-row__1.0.0.yaml/fastq-list-row/read_1"
+ },
+ {
+ "label": "read 2",
+ "doc": "The path to R2 of a sample\n",
+ "type": [
+ "null",
+ "File"
+ ],
+ "streamable": true,
+ "name": "#fastq-list-row__1.0.0.yaml/fastq-list-row/read_2"
+ },
+ {
+ "label": "rgid",
+ "doc": "The read-group id of the sample.\nOften an index\n",
+ "type": "string",
+ "name": "#fastq-list-row__1.0.0.yaml/fastq-list-row/rgid"
+ },
+ {
+ "label": "rglb",
+ "doc": "The read-group library of the sample.\n",
+ "type": "string",
+ "name": "#fastq-list-row__1.0.0.yaml/fastq-list-row/rglb"
+ },
+ {
+ "label": "rgsm",
+ "doc": "The read-group sample name\n",
+ "type": "string",
+ "name": "#fastq-list-row__1.0.0.yaml/fastq-list-row/rgsm"
+ }
+ ],
+ "id": "#fastq-list-row__1.0.0.yaml"
+ }
+ ],
+ "class": "SchemaDefRequirement"
+ }
+ ],
+ "inputs": [
+ {
+ "label": "two dim array",
+ "doc": "two dimensional array with fastq list row\n",
+ "type": {
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": "#fastq-list-row__1.0.0.yaml/fastq-list-row"
+ }
+ },
+ "inputBinding": {
+ "loadContents": true
+ },
+ "id": "#flatten-array-fastq-list__1.0.0.cwl/arrayTwoDim"
+ }
+ ],
+ "outputs": [
+ {
+ "label": "one dim array",
+ "doc": "one dimensional array\n",
+ "type": {
+ "type": "array",
+ "items": "#fastq-list-row__1.0.0.yaml/fastq-list-row"
+ },
+ "id": "#flatten-array-fastq-list__1.0.0.cwl/array1d"
+ }
+ ],
+ "expression": "${\n var newArray= [];\n for (var i = 0; i < inputs.arrayTwoDim.length; i++) {\n for (var k = 0; k < inputs.arrayTwoDim[i].length; k++) {\n newArray.push((inputs.arrayTwoDim[i])[k]);\n }\n }\n return { 'array1d' : newArray }\n}\n",
+ "https://schema.org/author": {
+ "class": "https://schema.org/Person",
+ "https://schema.org/name": "Sehrish Kanwal",
+ "https://schema.org/email": "sehrish.kanwal@umccr.org"
+ },
+ "$namespaces": {
+ "s": "https://schema.org/"
+ }
+ },
+ {
+ "class": "ExpressionTool",
+ "id": "#get-samplesheet-midfix-regex__1.0.0.cwl",
+ "label": "get-samplesheet-midfix-regex v(1.0.0)",
+ "doc": "Documentation for get-samplesheet-midfix-regex v1.0.0\n",
+ "requirements": [
+ {
+ "expressionLib": [
+ "var get_batch_name_from_samplesheet = function(samplesheet_basename) { /* Get everything between SampleSheet and csv https://regex101.com/r/KlF7LW/1 */ var samplesheet_regex = /SampleSheet\\.(\\S+)\\.csv/g; return samplesheet_regex.exec(samplesheet_basename)[1]; }",
+ "var get_batch_names = function(file_objs) { /* For each file object extract the midfix */\n/* Initialise batch names */ var batch_names = [];\nfor (var i = 0; i < file_objs.length; i++){ /* But of that basename, get the midfix */ batch_names.push(get_batch_name_from_samplesheet(file_objs[i].basename)); }\nreturn batch_names; }"
+ ],
+ "class": "InlineJavascriptRequirement"
+ }
+ ],
+ "inputs": [
+ {
+ "label": "sample sheets",
+ "doc": "Input samplesheet to extract midfix from\n",
+ "type": {
+ "type": "array",
+ "items": "File"
+ },
+ "id": "#get-samplesheet-midfix-regex__1.0.0.cwl/samplesheets"
+ }
+ ],
+ "outputs": [
+ {
+ "label": "output batch names",
+ "doc": "List of output batch names\n",
+ "type": {
+ "type": "array",
+ "items": "string"
+ },
+ "id": "#get-samplesheet-midfix-regex__1.0.0.cwl/batch_names"
+ }
+ ],
+ "expression": "${\n return {\"batch_names\": get_batch_names(inputs.samplesheets)};\n}",
+ "https://schema.org/author": {
+ "class": "https://schema.org/Person",
+ "https://schema.org/name": "Sehrish Kanwal",
+ "https://schema.org/email": "sehrish.kanwal@umccr.org"
+ }
+ },
+ {
+ "class": "CommandLineTool",
+ "id": "#bclConvert__3.7.5.cwl",
+ "label": "bclConvert v(3.7.5)",
+ "doc": "Runs the BCL Convert application off standard architechture\n",
+ "hints": [
+ {
+ "dockerPull": "umccr/bcl-convert:3.7.5",
+ "class": "DockerRequirement"
+ },
+ {
+ "coresMin": 72,
+ "ramMin": 64000,
+ "class": "ResourceRequirement",
+ "http://platform.illumina.com/rdf/ica/resources": {
+ "type": "standardHiCpu",
+ "size": "large"
+ }
+ }
+ ],
+ "requirements": [
+ {
+ "listing": [
+ {
+ "entryname": "scripts/run_bclconvert.sh",
+ "entry": "#!/usr/bin/bash\n\n# Fail on non-zero exit code\nset -euo pipefail\n\n# Run bcl-convert with input parameters\neval bcl-convert '\"\\${@}\"'\n\n# Delete undetermined indices\nif [[ \"$(inputs.delete_undetermined_indices)\" == \"true\" ]]; then\n echo \"Deleting undetermined indices\" 1>&2\n find \"$(inputs.output_directory)\" -mindepth 1 -maxdepth 1 -name 'Undetermined_S0_*' -exec rm {} \\\\;\nfi\n"
+ }
+ ],
+ "class": "InitialWorkDirRequirement"
+ },
+ {
+ "class": "InlineJavascriptRequirement"
+ },
+ {
+ "types": [
+ {
+ "$import": "#fastq-list-row__1.0.0.yaml"
+ }
+ ],
+ "class": "SchemaDefRequirement"
+ }
+ ],
+ "baseCommand": [
+ "bash"
+ ],
+ "arguments": [
+ {
+ "position": -1,
+ "valueFrom": "scripts/run_bclconvert.sh"
+ }
+ ],
+ "inputs": [
+ {
+ "label": "bcl conversion threads",
+ "doc": "Specifies number of threads used for conversion per tile.\nMust be between 1 and available hardware threads,\ninclusive.\n",
+ "type": [
+ "null",
+ "int"
+ ],
+ "inputBinding": {
+ "prefix": "--bcl-conversion-threads"
+ },
+ "id": "#bclConvert__3.7.5.cwl/bcl_conversion_threads"
+ },
+ {
+ "label": "bcl input directory",
+ "doc": "A main command-line option that indicates the path to the run\nfolder directory\n",
+ "type": "Directory",
+ "inputBinding": {
+ "prefix": "--bcl-input-directory"
+ },
+ "id": "#bclConvert__3.7.5.cwl/bcl_input_directory"
+ },
+ {
+ "label": "bcl num compression threads",
+ "doc": "Specifies number of CPU threads used for compression of\noutput FASTQ files. Must be between 1 and available\nhardware threads, inclusive.\n",
+ "type": [
+ "null",
+ "int"
+ ],
+ "inputBinding": {
+ "prefix": "--bcl-num-compression-threads"
+ },
+ "id": "#bclConvert__3.7.5.cwl/bcl_num_compression_threads"
+ },
+ {
+ "label": "bcl num decompression threads",
+ "doc": "Specifies number of CPU threads used for decompression\nof input base call files. Must be between 1 and available\nhardware threads, inclusive.\n",
+ "type": [
+ "null",
+ "int"
+ ],
+ "inputBinding": {
+ "prefix": "--bcl-num-decompression-threads"
+ },
+ "id": "#bclConvert__3.7.5.cwl/bcl_num_decompression_threads"
+ },
+ {
+ "label": "bcl num parallel tiles",
+ "doc": "Specifies number of tiles being converted to FASTQ files in\nparallel. Must be between 1 and available hardware threads,\ninclusive.\n",
+ "type": [
+ "null",
+ "int"
+ ],
+ "inputBinding": {
+ "prefix": "--bcl-num-parallel-tiles"
+ },
+ "id": "#bclConvert__3.7.5.cwl/bcl_num_parallel_tiles"
+ },
+ {
+ "label": "convert only one lane",
+ "doc": "Convert only the specified lane number. The value must\nbe less than or equal to the number of lanes specified in the\nRunInfo.xml. Must be a single integer value.\n",
+ "type": [
+ "null",
+ "int"
+ ],
+ "inputBinding": {
+ "prefix": "--bcl-only-lane"
+ },
+ "id": "#bclConvert__3.7.5.cwl/bcl_only_lane"
+ },
+ {
+ "label": "bcl sample project subdirectories",
+ "doc": "true \u2014 Allows creation of Sample_Project subdirectories\nas specified in the sample sheet. This option must be set to true for\nthe Sample_Project column in the data section to be used.\nDefault set to false.\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "inputBinding": {
+ "prefix": "--bcl-sampleproject-subdirectories",
+ "valueFrom": "$(self.toString())"
+ },
+ "id": "#bclConvert__3.7.5.cwl/bcl_sampleproject_subdirectories"
+ },
+ {
+ "label": "delete undetermined indices",
+ "doc": "Delete undetermined indices on completion of the run\nDefault: false\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "id": "#bclConvert__3.7.5.cwl/delete_undetermined_indices"
+ },
+ {
+ "label": "first tile only",
+ "doc": "true \u2014 Only process the first tile of the first swath of the\ntop surface of each lane specified in the sample sheet.\nfalse \u2014 Process all tiles in each lane, as specified in the sample\nsheet. Default is false\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "inputBinding": {
+ "prefix": "--first-tile-only",
+ "valueFrom": "$(self.toString())"
+ },
+ "id": "#bclConvert__3.7.5.cwl/first_tile_only"
+ },
+ {
+ "label": "force",
+ "doc": "Allow for the directory specified by the --output-directory\noption to already exist. Default is false\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "inputBinding": {
+ "prefix": "--force"
+ },
+ "id": "#bclConvert__3.7.5.cwl/force"
+ },
+ {
+ "label": "output directory",
+ "doc": "A required command-line option that indicates the path to\ndemultuplexed fastq output. The directory must not exist, unless -f,\nforce is specified\n",
+ "type": "string",
+ "inputBinding": {
+ "prefix": "--output-directory"
+ },
+ "id": "#bclConvert__3.7.5.cwl/output_directory"
+ },
+ {
+ "label": "sample sheet",
+ "doc": "Indicates the path to the sample sheet to specify the\nsample sheet location and name, if different from the default.\n",
+ "type": [
+ "null",
+ "File"
+ ],
+ "inputBinding": {
+ "prefix": "--sample-sheet"
+ },
+ "id": "#bclConvert__3.7.5.cwl/samplesheet"
+ },
+ {
+ "label": "shared thread odirect output",
+ "doc": "Uses experimental shared-thread file output code, which\nrequires O_DIRECT mode. Must be true or false.\nThis file output method is optimized for sample counts\ngreater than 100,000. It is not recommended for lower\nsample counts or using a distributed file system target such\nas GPFS or Lustre. Default is false\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "inputBinding": {
+ "prefix": "--shared-thread-odirect-output"
+ },
+ "id": "#bclConvert__3.7.5.cwl/shared_thread_odirect_output"
+ },
+ {
+ "label": "strict mode",
+ "doc": "true \u2014 Abort the program if any filter, locs, bcl, or bci lane\nfiles are missing or corrupt.\nfalse \u2014 Continue processing if any filter, locs, bcl, or bci lane files\nare missing. Return a warning message for each missing or corrupt\nfile.\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "inputBinding": {
+ "prefix": "--strict-mode",
+ "valueFrom": "$(self.toString())"
+ },
+ "id": "#bclConvert__3.7.5.cwl/strict_mode"
+ }
+ ],
+ "outputs": [
+ {
+ "label": "bcl convert directory output",
+ "doc": "Output directory containing the fastq files, reports and stats\n",
+ "type": "Directory",
+ "outputBinding": {
+ "glob": "$(inputs.output_directory)"
+ },
+ "id": "#bclConvert__3.7.5.cwl/bcl_convert_directory_output"
+ },
+ {
+ "label": "fastq list rows",
+ "doc": "This schema contains the following inputs:\n* rgid: The id of the sample\n* rgsm: The name of the sample\n* rglb: The library of the sample\n* lane: The lane of the sample\n* read_1: The read 1 File of the sample\n* read_2: The read 2 File of the sample (optional)\n",
+ "type": {
+ "type": "array",
+ "items": "#fastq-list-row__1.0.0.yaml/fastq-list-row"
+ },
+ "outputBinding": {
+ "glob": "$(inputs.output_directory)/Reports/fastq_list.csv",
+ "loadContents": true,
+ "outputEval": "${\n /*\n Load inputs initialise output variables\n */\n var output_array = [];\n var lines = self[0].contents.split(\"\\n\")\n\n /*\n Generate output object by iterating through fastq_list csv\n */\n for (var i=0; i < lines.length - 1; i++){\n /*\n First line is a header row skip it\n */\n if (i === 0){\n continue;\n }\n\n /*\n Split row and collect corresponding file paths\n */\n var rgid = lines[i].split(\",\")[0];\n var rgsm = lines[i].split(\",\")[1];\n var rglb = lines[i].split(\",\")[2];\n var lane = parseInt(lines[i].split(\",\")[3]);\n var read_1_path = lines[i].split(\",\")[4];\n var read_2_path = lines[i].split(\",\")[5];\n\n /*\n Initialise the output row as a dict\n */\n var output_fastq_list_row = {\n \"rgid\": rgid,\n \"rglb\": rglb,\n \"rgsm\": rgsm,\n \"lane\": lane,\n \"read_1\": {\n \"class\": \"File\",\n \"path\": read_1_path\n },\n }\n\n\n if (read_2_path !== \"\"){\n /*\n read 2 path exists\n */\n output_fastq_list_row[\"read_2\"] = {\n \"class\": \"File\",\n \"path\": read_2_path\n }\n }\n\n /*\n Append object to output array\n */\n output_array.push(output_fastq_list_row);\n }\n return output_array;\n}\n"
+ },
+ "id": "#bclConvert__3.7.5.cwl/fastq_list_rows"
+ }
+ ],
+ "successCodes": [
+ 0
+ ],
+ "https://schema.org/author": {
+ "class": "https://schema.org/Person",
+ "https://schema.org/name": "Sehrish Kanwal",
+ "https://schema.org/email": "sehrish.kanwal@umccr.org"
+ }
+ },
+ {
+ "class": "CommandLineTool",
+ "id": "#custom-samplesheet-split-by-settings__1.0.0.cwl",
+ "label": "custom-samplesheet-split-by-settings v(1.0.0)",
+ "doc": "Use before running bcl-convert workflow to ensure that the bclConvert workflow can run in parallel.\nSamples will be split into separate samplesheets based on their cycles specification\n",
+ "hints": [
+ {
+ "dockerPull": "umccr/alpine_pandas:latest-cwl",
+ "class": "DockerRequirement"
+ },
+ {
+ "class": "ResourceRequirement",
+ "http://platform.illumina.com/rdf/ica/resources": {
+ "tier": "standard",
+ "type": "standard",
+ "size": "small",
+ "coresMin": 2,
+ "ramMin": 4000
+ }
+ }
+ ],
+ "requirements": [
+ {
+ "listing": [
+ {
+ "entryname": "samplesheet-by-settings.py",
+ "entry": "#!/usr/bin/env python3\n\n\"\"\"\nTake in a samplesheet,\nLook through headers, rename as necessary\nLook through samples, update settings logic as necessary\nSplit samplesheet out into separate settings files\nWrite to separate files\nIf --samplesheet-format is set to v2 then:\n* rename Settings.Adapter to Settings.AdapterRead1\n* Reduce Data to the columns Lane, Sample_ID, index, index2, Sample_Project\n* Add FileFormatVersion=2 to Header\n* Convert Reads from list to dict with Read1Cycles and Read2Cycles as keys\n\"\"\"\n\n# Imports\nimport re\nimport os\nimport pandas as pd\nimport numpy as np\nimport logging\nimport argparse\nfrom pathlib import Path\nimport sys\nfrom copy import deepcopy\nimport json\n\n# Set logging level\nlogging.basicConfig(level=logging.DEBUG)\n\n# Globals\nSAMPLESHEET_HEADER_REGEX = r\"^\\[(\\S+)\\](,+)?\" # https://regex101.com/r/5nbe9I/1\nV2_SAMPLESHEET_HEADER_VALUES = {\"Data\": \"BCLConvert_Data\",\n \"Settings\": \"BCLConvert_Settings\"}\nV2_FILE_FORMAT_VERSION = \"2\"\nV2_DEFAULT_INSTRUMENT_TYPE = \"NovaSeq 6000\"\n\n\ndef get_args():\n \"\"\"\n Get arguments for the command\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Create samplesheets based on settings inputs.\"\n \"Expects a v1 samplesheet as input and settings by samples\"\n \"as inputs through separated jsonised strings / arrays\")\n\n # Arguments\n parser.add_argument(\"--samplesheet-csv\", required=True,\n help=\"Path to v1 samplesheet csv\")\n parser.add_argument(\"--out-dir\", required=False,\n help=\"Output directory for samplesheets, set to cwd if not specified\")\n parser.add_argument(\"--settings-by-samples\", action=\"append\", nargs='*', required=False,\n default=[],\n help=\"Settings logic for each sample\")\n parser.add_argument(\"--ignore-missing-samples\", required=False,\n default=False, action=\"store_true\",\n help=\"If not set, error if samples in the samplesheet are not present in --settings-by-samples arg\")\n parser.add_argument(\"--samplesheet-format\", required=False,\n choices=[\"v1\", \"v2\"], default=\"v1\",\n help=\"Type of samplesheet we wish to output\")\n\n return parser.parse_args()\n\n\ndef set_args(args):\n \"\"\"\n Convert --settings-by-samples to dict\n :return:\n \"\"\"\n\n # Get user args\n samplesheet_csv_arg = getattr(args, \"samplesheet_csv\", None)\n outdir_arg = getattr(args, \"out_dir\", None)\n settings_by_samples_arg = getattr(args, \"settings_by_samples\", [])\n\n # Convert samplesheet csv to path\n samplesheet_csv_path = Path(samplesheet_csv_arg)\n # Check its a file\n if not samplesheet_csv_path.is_file():\n logging.error(\"Could not find file {}\".format(samplesheet_csv_path))\n sys.exit(1)\n # Set attribute as Path object\n setattr(args, \"samplesheet_csv\", samplesheet_csv_path)\n\n # Checking the output path\n if outdir_arg is None:\n outdir_arg = os.getcwd()\n outdir_path = Path(outdir_arg)\n if not outdir_path.parent.is_dir():\n logging.error(\"Could not create --out-dir, make sure parents exist. Exiting\")\n sys.exit(1)\n elif not outdir_path.is_dir():\n outdir_path.mkdir(parents=False)\n setattr(args, \"out_dir\", outdir_path)\n\n # Load json lists\n settings_by_samples_list = []\n for settings_by_samples in settings_by_samples_arg:\n settings_by_samples_list.append(json.loads(settings_by_samples[0]))\n\n # Set attr as dicts grouped by each batch_name\n settings_by_batch_names = {}\n for settings_by_samples in settings_by_samples_list:\n # Initialise batch name\n batch_name_key = settings_by_samples.get(\"batch_name\")\n settings_by_batch_names[batch_name_key] = {}\n # Add in sample ids\n settings_by_batch_names[batch_name_key][\"samples\"] = settings_by_samples.get(\"samples\")\n # Add in settings\n settings_by_batch_names[batch_name_key][\"settings\"] = settings_by_samples.get(\"settings\")\n\n # Write attributes back to args dict\n setattr(args, \"settings_by_batch_names\", settings_by_batch_names)\n\n # Return args\n return args\n\n\ndef read_samplesheet_csv(samplesheet_csv_path):\n \"\"\"\n Read the samplesheet like a dodgy INI parser\n :param samplesheet_csv_path:\n :return:\n \"\"\"\n with open(samplesheet_csv_path, \"r\") as samplesheet_csv_h:\n # Read samplesheet in\n sample_sheet_sections = {}\n current_section = None\n current_section_item_list = []\n header_match_regex = re.compile(SAMPLESHEET_HEADER_REGEX)\n\n for line in samplesheet_csv_h.readlines():\n # Check if blank line\n if line.strip().rstrip(\",\") == \"\":\n continue\n # Check if the current line is a header\n header_match_obj = header_match_regex.match(line.strip())\n if header_match_obj is not None and current_section is None:\n # First line, don't need to write out previous section to obj\n # Set current section to first group\n current_section = header_match_obj.group(1)\n current_section_item_list = []\n elif header_match_obj is not None and current_section is not None:\n # A header further down, write out previous section and then reset sections\n sample_sheet_sections[current_section] = current_section_item_list\n # Now reset sections\n current_section = header_match_obj.group(1)\n current_section_item_list = []\n # Make sure the first line is a section\n elif current_section is None and header_match_obj is None:\n logging.error(\"Top line of csv was not a section header. Exiting\")\n sys.exit(1)\n else: # We're in a section\n if not current_section == \"Data\":\n # Strip trailing slashes from line\n current_section_item_list.append(line.strip().rstrip(\",\"))\n else:\n # Don't strip trailing slashes from line\n current_section_item_list.append(line.strip())\n\n # Write out the last section\n sample_sheet_sections[current_section] = current_section_item_list\n\n return sample_sheet_sections\n\n\ndef configure_samplesheet_obj(sample_sheet_obj):\n \"\"\"\n Each section of the samplesheet obj is in a ',' delimiter ini format\n Except for [Reads] which is just a list\n And [Data] which is a dataframe\n :param sample_sheet_obj:\n :return:\n \"\"\"\n\n for section_name, section_str_list in sample_sheet_obj.items():\n if section_name == \"Data\":\n # Convert to dataframe\n sample_sheet_obj[section_name] = pd.DataFrame(columns=section_str_list[0].split(\",\"),\n data=[row.split(\",\") for row in\n section_str_list[1:]])\n elif section_name == \"Reads\":\n # Keep as a list\n continue\n else:\n # Convert to dict\n sample_sheet_obj[section_name] = {line.split(\",\", 1)[0]: line.split(\",\", 1)[-1]\n for line in section_str_list\n if not line.split(\",\", 1)[0] == \"\"}\n # Check all values are non empty\n for key, value in sample_sheet_obj[section_name].items():\n if value == \"\" or value.startswith(\",\"):\n logging.error(\"Could not parse key \\\"{}\\\" in section \\\"{}\\\"\".format(key, section_name))\n logging.error(\"Value retrieved was \\\"{}\\\"\".format(value))\n sys.exit(1)\n\n return sample_sheet_obj\n\n\ndef lower_under_score_to_camel_case(string):\n \"\"\"\n Quick script to update a string from \"this_type\" to \"ThisType\"\n Necessary for the bclconvert settings to be in camel case\n Parameters\n ----------\n string\n Returns\n -------\n \"\"\"\n\n camel_case_string_list = []\n words_list = string.split(\"_\")\n\n for word in words_list:\n camel_case_string_list.append(word.title())\n\n return \"\".join(map(str, camel_case_string_list))\n\n\ndef strip_ns_from_indexes(samplesheetobj_data_df):\n \"\"\"\n Strip Ns from the end of the index and index2 headers\n :param samplesheetobj_data_df:\n :return:\n \"\"\"\n\n samplesheetobj_data_df['index'] = samplesheetobj_data_df['index'].apply(lambda x: x.rstrip(\"N\"))\n if 'index2' in samplesheetobj_data_df.columns.tolist():\n samplesheetobj_data_df['index2'] = samplesheetobj_data_df['index2'].apply(lambda x: x.rstrip(\"N\"))\n samplesheetobj_data_df['index2'] = samplesheetobj_data_df['index2'].replace(\"\", np.nan)\n\n return samplesheetobj_data_df\n\n\ndef rename_settings_and_data_headers_v2(samplesheet_obj):\n \"\"\"\n :return:\n \"\"\"\n\n for v1_key, v2_key in V2_SAMPLESHEET_HEADER_VALUES.items():\n if v1_key in samplesheet_obj.keys():\n samplesheet_obj[v2_key] = samplesheet_obj.pop(v1_key)\n\n return samplesheet_obj\n\n\ndef add_file_format_version_v2(samplesheet_header):\n \"\"\"\n Add FileFormatVersion key pair to samplesheet header for v2 samplesheet\n :param samplesheet_header:\n :return:\n \"\"\"\n\n samplesheet_header['FileFormatVersion'] = V2_FILE_FORMAT_VERSION\n\n return samplesheet_header\n\n\ndef set_instrument_type(samplesheet_header):\n \"\"\"\n Fix InstrumentType if it's not specified\n :param samplesheet_header:\n :return:\n \"\"\"\n\n if \"InstrumentType\" not in samplesheet_header.keys():\n samplesheet_header[\"InstrumentType\"] = V2_DEFAULT_INSTRUMENT_TYPE\n\n return samplesheet_header\n\n\ndef update_settings_v2(samplesheet_settings):\n \"\"\"\n Convert Adapter To AdapterRead1 for v2 samplesheet\n :param samplesheet_settings:\n :return:\n \"\"\"\n\n # Rename Adapter to AdapterRead1\n if \"Adapter\" in samplesheet_settings.keys() and not \"AdapterRead1\" in samplesheet_settings.keys():\n samplesheet_settings[\"AdapterRead1\"] = samplesheet_settings.pop(\"Adapter\")\n elif \"Adapter\" in samplesheet_settings.keys() and \"AdapterRead1\" in samplesheet_settings.keys():\n _ = samplesheet_settings.pop(\"Adapter\")\n\n # Drop any settings where settings are \"\" - needed for \"AdapterRead2\"\n samplesheet_settings = {\n key: val\n for key, val in samplesheet_settings.items()\n if not val == \"\"\n }\n return samplesheet_settings\n\n\ndef truncate_data_columns_v2(samplesheet_data_df):\n \"\"\"\n Truncate data columns to v2 columns\n Lane,Sample_ID,index,index2,Sample_Project\n :param samplesheet_data_df:\n :return:\n \"\"\"\n\n v2_columns = [\"Lane\", \"Sample_ID\", \"index\", \"index2\", \"Sample_Project\"]\n samplesheet_data_df = samplesheet_data_df.filter(items=v2_columns)\n\n return samplesheet_data_df\n\n\ndef convert_reads_from_list_to_dict_v2(samplesheet_reads):\n \"\"\"\n Convert Reads from a list to a dict format\n :param samplesheet_reads:\n :return:\n \"\"\"\n\n samplesheet_reads = {\"Read{}Cycles\".format(i + 1): rnum for i, rnum in enumerate(samplesheet_reads)}\n\n return samplesheet_reads\n\n\ndef convert_samplesheet_to_v2(samplesheet_obj):\n \"\"\"\n Runs through necessary steps to convert object to v2 samplesheet\n :param samplesheet_obj:\n :return:\n \"\"\"\n samplesheet_obj[\"Header\"] = add_file_format_version_v2(samplesheet_obj[\"Header\"])\n samplesheet_obj[\"Header\"] = set_instrument_type(samplesheet_obj[\"Header\"])\n samplesheet_obj[\"Settings\"] = update_settings_v2(samplesheet_obj[\"Settings\"])\n samplesheet_obj[\"Data\"] = truncate_data_columns_v2(samplesheet_obj[\"Data\"])\n samplesheet_obj[\"Reads\"] = convert_reads_from_list_to_dict_v2(samplesheet_obj[\"Reads\"])\n samplesheet_obj = rename_settings_and_data_headers_v2(samplesheet_obj)\n\n return samplesheet_obj\n\n\ndef check_samples(samplesheet_obj, settings_by_samples, ignore_missing_samples=False):\n \"\"\"\n If settings_by_samples is defined, ensure that each sample is present\n \"\"\"\n all_samples_in_samplesheet = samplesheet_obj[\"Data\"][\"Sample_ID\"].tolist()\n popped_samples = []\n\n if len(settings_by_samples.keys()) == 0:\n # No problem as we're not splitting samples by sample sheet\n return\n\n for batch_name, batch_settings_and_samples_dict in settings_by_samples.items():\n samples = batch_settings_and_samples_dict.get(\"samples\")\n for sample in samples:\n if sample in popped_samples:\n logging.error(\"Sample \\\"{}\\\" registered multiple times\".format(sample))\n sys.exit(1)\n elif sample not in all_samples_in_samplesheet:\n logging.error(\"Could not find sample \\\"{}\\\"\".format(sample))\n sys.exit(1)\n else:\n popped_samples.append(all_samples_in_samplesheet.pop(all_samples_in_samplesheet.index(sample)))\n\n if ignore_missing_samples:\n # No issue\n return\n\n if not len(all_samples_in_samplesheet) == 0:\n logging.error(\"The following samples have no associated batch name: {}\".format(\n \", \".join(map(str, [\"\\\"{}\\\"\".format(sample) for sample in all_samples_in_samplesheet]))\n ))\n sys.exit(1)\n\n\ndef write_out_samplesheets(samplesheet_obj, out_dir, settings_by_samples,\n is_v2=False):\n \"\"\"\n Write out samplesheets to each csv file\n :return:\n \"\"\"\n\n if not len(list(settings_by_samples.keys())) == 0:\n for batch_name, batch_settings_and_samples_dict in settings_by_samples.items():\n # Get settings\n settings = batch_settings_and_samples_dict.get(\"settings\")\n samples = batch_settings_and_samples_dict.get(\"samples\")\n # Duplicate samplesheet_obj\n samplesheet_obj_by_settings_copy = deepcopy(samplesheet_obj)\n # Convert df to csv string\n samplesheet_obj_by_settings_copy[\"Data\"] = samplesheet_obj_by_settings_copy[\"Data\"].query(\"Sample_ID in @samples\")\n # Update settings\n for setting_key, setting_val in settings.items():\n \"\"\"\n Update settings\n \"\"\"\n if setting_val is None:\n # Don't add None var\n continue\n # Update setting value for boolean types\n if type(setting_val) == bool:\n setting_val = 1 if setting_val else 0\n # Then assign to settings dict\n samplesheet_obj_by_settings_copy[\"Settings\"][\n lower_under_score_to_camel_case(setting_key)] = setting_val\n\n # Write out config\n write_samplesheet(samplesheet_obj=samplesheet_obj_by_settings_copy,\n output_file=out_dir / \"SampleSheet.{}.csv\".format(batch_name),\n is_v2=is_v2)\n\n else: # No splitting required\n write_samplesheet(samplesheet_obj=samplesheet_obj,\n output_file=out_dir / \"SampleSheet.csv\",\n is_v2=is_v2)\n\n\ndef write_samplesheet(samplesheet_obj, output_file, is_v2):\n \"\"\"\n Write out the samplesheet object and a given file\n :param samplesheet_obj:\n :param output_file:\n :param is_v2\n :return:\n \"\"\"\n\n # Rename samplesheet at the last possible moment\n if is_v2:\n # Drop index2 if all are \"N/A\"\n if 'index2' in samplesheet_obj[\"Data\"].columns.tolist() and \\\n samplesheet_obj[\"Data\"][\"index2\"].isna().all():\n samplesheet_obj[\"Data\"] = samplesheet_obj[\"Data\"].drop(columns=\"index2\")\n\n samplesheet_obj = convert_samplesheet_to_v2(samplesheet_obj)\n\n # Write the output file\n with open(output_file, 'w') as samplesheet_h:\n for section, section_values in samplesheet_obj.items():\n # Write out the section header\n samplesheet_h.write(\"[{}]\\n\".format(section))\n # Write out values\n if type(section_values) == list: # [Reads] for v1 samplesheets\n # Write out each item in a new line\n samplesheet_h.write(\"\\n\".join(section_values))\n elif type(section_values) == dict:\n samplesheet_h.write(\"\\n\".join(map(str, [\"{},{}\".format(key, value)\n for key, value in section_values.items()])))\n elif type(section_values) == pd.DataFrame:\n section_values.to_csv(samplesheet_h, index=False, header=True, sep=\",\")\n # Add new line before the next section\n samplesheet_h.write(\"\\n\\n\")\n\n\ndef main():\n # Get args\n args = get_args()\n\n # Check / set args\n logging.info(\"Checking args\")\n args = set_args(args=args)\n\n # Read config\n logging.info(\"Reading samplesheet\")\n samplesheet_obj = read_samplesheet_csv(samplesheet_csv_path=args.samplesheet_csv)\n\n # Configure samplesheet\n logging.info(\"Configuring samplesheet\")\n samplesheet_obj = configure_samplesheet_obj(samplesheet_obj)\n\n # Check missing samples\n logging.info(\"Checking missing samples\")\n check_samples(samplesheet_obj=samplesheet_obj,\n settings_by_samples=getattr(args, \"settings_by_batch_names\", {}),\n ignore_missing_samples=args.ignore_missing_samples)\n\n # Strip Ns from samplesheet indexes\n logging.info(\"Stripping Ns from indexes\")\n samplesheet_obj[\"Data\"] = strip_ns_from_indexes(samplesheet_obj[\"Data\"])\n\n # Write out samplesheets\n logging.info(\"Writing out samplesheets\")\n write_out_samplesheets(samplesheet_obj=samplesheet_obj,\n out_dir=args.out_dir,\n settings_by_samples=getattr(args, \"settings_by_batch_names\", {}),\n is_v2=True if args.samplesheet_format == \"v2\" else False)\n\n# Run main script\nif __name__ == \"__main__\":\n main()\n"
+ }
+ ],
+ "class": "InitialWorkDirRequirement"
+ },
+ {
+ "class": "InlineJavascriptRequirement"
+ },
+ {
+ "types": [
+ {
+ "type": "record",
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples",
+ "fields": [
+ {
+ "label": "batch name",
+ "doc": "The name for this combination of settings and sample ids.\nWill be used as the midfix for the name of the sample sheet.\nWill be used as the output directory in the bclconvert workflow\n",
+ "type": "string",
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/batch_name"
+ },
+ {
+ "label": "samples",
+ "doc": "The list of Sample_IDs with these BClConvert settings\n",
+ "type": {
+ "type": "array",
+ "items": "string"
+ },
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/samples"
+ },
+ {
+ "label": "settings by override cylces",
+ "doc": "Additional bcl convert settings\n",
+ "type": [
+ "null",
+ {
+ "type": "record",
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings",
+ "fields": [
+ {
+ "label": "adapter behavior",
+ "doc": "Defines whether the software\nmasks or trims Read 1 and/or\nRead 2 adapter sequence(s).\nWhen AdapterRead1 or\nAdapterRead2 is not specified, this\nsetting cannot be specified.\n\u2022 mask\u2014The software masks the\nidentified Read 1 and/or Read 2\nsequence(s) with N.\n\u2022 trim\u2014The software trims the\nidentified Read 1 and/or Read 2\nsequence(s)\n",
+ "type": [
+ "null",
+ {
+ "type": "enum",
+ "symbols": [
+ "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/adapter_behavior/mask",
+ "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/adapter_behavior/trim"
+ ]
+ }
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/adapter_behavior"
+ },
+ {
+ "label": "adapter read 1",
+ "doc": "The sequence of the Read 1\nadapter to be masked or trimmed.\nTo trim multiple adapters, separate\nthe sequences with a plus sign (+)\nto indicate independent adapters\nthat must be independently\nassessed for masking or trimming\nfor each read.\nAllowed characters: A, T, C, G.\n",
+ "type": [
+ "null",
+ "string"
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/adapter_read_1"
+ },
+ {
+ "label": "adapter read 2",
+ "doc": "The sequence of the Read 2\nadapter to be masked or trimmed.\nTo trim multiple adapters, separate\nthe sequences with a plus sign (+)\nto indicate independent adapters\nthat must be independently\nassessed for masking or trimming\nfor each read.\nAllowed characters: A, T, C, G.\n",
+ "type": [
+ "null",
+ "string"
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/adapter_read_2"
+ },
+ {
+ "label": "adapter stringency",
+ "doc": "he minimum match rate that\ntriggers masking or trimming. This\nvalue is calculated as MatchCount\n/ (MatchCount+MismatchCount).\nAccepted values are 0.5\u20131. The\ndefault value of 0.9 indicates that\nonly reads with \u2265 90% sequence\nidentity with the adapter are\ntrimmed.\n",
+ "type": [
+ "null",
+ "float"
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/adapter_stringency"
+ },
+ {
+ "label": "barcode mismatches index 1",
+ "doc": "The number of mismatches\nallowed for index1. Accepted\nvalues are 0, 1, or 2.\n",
+ "type": [
+ "null",
+ "int"
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/barcode_mismatches_index_1"
+ },
+ {
+ "label": "barcode mismatches index 2",
+ "doc": "The number of mismatches\nallowed for index2. Accepted\nvalues are 0, 1, or 2.\n",
+ "type": [
+ "null",
+ "int"
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/barcode_mismatches_index_2"
+ },
+ {
+ "label": "create fastq for index reads",
+ "doc": "Specifies whether software will\noutput fastqs for index reads. If\nindex reads are defined as a\nUMI then fastqs for the UMI will\nbe output (if TrimUMI is also set\nto 0). At least 1 index read must\nbe specified in the sample\nsheet.\n\u2022 0\u2014Fastq files will not be output\nfor index reads.\n\u2022 1\u2014Fastq files will be output for\nfastq reads.\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/create_fastq_for_index_reads"
+ },
+ {
+ "label": "mask short reads",
+ "doc": "The minimum read length\ncontaining A, T, C, G values after\nadapter trimming. Reads with\nless than this number of bases\nbecome completely masked. If\nthis value is less than 22, the\ndefault becomes the\nMinimumTrimmedReadLength.\n",
+ "type": [
+ "null",
+ "int"
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/mask_short_reads"
+ },
+ {
+ "label": "minumum adapter overlap",
+ "doc": "Do not trim any bases unless the\nadapter matches are greater than\nor equal to the user specified\nnumber of bases. At least one\nAdapterRead1 or\nAdapterRead2 must be specified\nto use\nMinimumAdapterOverlap.\nAllowed characters: 1, 2, 3.\n",
+ "type": [
+ "null",
+ "int"
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/minimum_adapter_overlap"
+ },
+ {
+ "label": "minimum trimmed read length",
+ "doc": "The minimum read length after\nadapter trimming. The software\ntrims adapter sequences from\nreads to the value of this\nparameter. Bases below the\nspecified value are masked with\nN.\n",
+ "type": [
+ "null",
+ "int"
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/minimum_trimmed_read_length"
+ },
+ {
+ "label": "override cycles",
+ "doc": "Specifies the sequencing and\nindexing cycles that should be\nused when processing the data.\nThe following format must be\nused:\n* Must be same number of\nsemicolon delimited fields in\nstring as sequencing and\nindexing reads specified in\nRunInfo.xml\n* Indexing reads are specified\nwith an I.\n* Sequencing reads are specified\nwith a Y. UMI cycles are\nspecified with an U.\n* Trimmed reads are specified\nwith N.\n* The number of cycles specified\nfor each read must sum to the\nnumber of cycles specified for\nthat read in the RunInfo.xml.\n* Only one Y or I sequence can\nbe specified per read.\nExample: Y151;I8;I8;Y151\n",
+ "type": [
+ "null",
+ "string"
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/override_cycles"
+ },
+ {
+ "label": "trim umi",
+ "doc": "Specifies whether UMI cycles\nwill be excluded from fastq files.\nAt least one UMI is required to\nbe specified in the Sample\nSheet when this setting is\nprovided.\n\u2022 0\u2014UMI cycles will be output to\nfastq files.\n\u2022 1\u2014 UMI cycles will not be\noutput to fastq files.\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings/settings/trim_umi"
+ }
+ ]
+ }
+ ],
+ "name": "#settings-by-samples__1.0.0.yaml/settings-by-samples/settings"
+ }
+ ],
+ "id": "#settings-by-samples__1.0.0.yaml"
+ }
+ ],
+ "class": "SchemaDefRequirement"
+ }
+ ],
+ "baseCommand": [
+ "python3",
+ "samplesheet-by-settings.py"
+ ],
+ "inputs": [
+ {
+ "label": "ignore missing samples",
+ "doc": "Don't raise an error if samples from the override cycles list are missing. Just remove them\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "inputBinding": {
+ "prefix": "--ignore-missing-samples"
+ },
+ "id": "#custom-samplesheet-split-by-settings__1.0.0.cwl/ignore_missing_samples"
+ },
+ {
+ "label": "out dir",
+ "doc": "Where to place the output samplesheet csv files\n",
+ "type": [
+ "null",
+ "string"
+ ],
+ "inputBinding": {
+ "prefix": "--out-dir"
+ },
+ "default": "samplesheets-by-override-cycles",
+ "id": "#custom-samplesheet-split-by-settings__1.0.0.cwl/out_dir"
+ },
+ {
+ "label": "samplesheet csv",
+ "doc": "The path to the original samplesheet csv file\n",
+ "type": "File",
+ "inputBinding": {
+ "prefix": "--samplesheet-csv"
+ },
+ "id": "#custom-samplesheet-split-by-settings__1.0.0.cwl/samplesheet_csv"
+ },
+ {
+ "label": "samplesheet format",
+ "type": [
+ "null",
+ {
+ "type": "enum",
+ "symbols": [
+ "#custom-samplesheet-split-by-settings__1.0.0.cwl/samplesheet_format/v1",
+ "#custom-samplesheet-split-by-settings__1.0.0.cwl/samplesheet_format/v2"
+ ]
+ }
+ ],
+ "doc": "Set samplesheet to be in v1 or v2 format\n",
+ "inputBinding": {
+ "prefix": "--samplesheet-format"
+ },
+ "id": "#custom-samplesheet-split-by-settings__1.0.0.cwl/samplesheet_format"
+ },
+ {
+ "label": "settings by samples",
+ "doc": "Takes in an object form of settings by samples. This is used to split samplesheets\n",
+ "type": [
+ "null",
+ {
+ "type": "array",
+ "items": "#settings-by-samples__1.0.0.yaml/settings-by-samples",
+ "inputBinding": {
+ "prefix": "--settings-by-samples=",
+ "separate": false,
+ "valueFrom": "${\n /*\n Format is {\"batch_name\": \"WGS\", \"sample_ids\":[\"S1\", \"S2\", \"S3\"], \"settings\":{\"adapter_read_1\":\"foo\", \"setting_2\":\"bar\"}}\n Although BCLConvert settings are in camel-case, we have settings in lower case, with underscore separation instead\n Settings are translated to camel case in the workflow. adapter_read_1 becomes AdapterRead1\n */\n return JSON.stringify(self);\n}\n"
+ }
+ }
+ ],
+ "inputBinding": {
+ "position": 1
+ },
+ "id": "#custom-samplesheet-split-by-settings__1.0.0.cwl/settings_by_samples"
+ }
+ ],
+ "outputs": [
+ {
+ "label": "samplesheets outdir",
+ "doc": "Directory of samplesheets\n",
+ "type": "Directory",
+ "outputBinding": {
+ "glob": "$(inputs.out_dir)"
+ },
+ "id": "#custom-samplesheet-split-by-settings__1.0.0.cwl/samplesheet_outdir"
+ },
+ {
+ "label": "output samplesheets",
+ "doc": "List of output samplesheets\n",
+ "type": {
+ "type": "array",
+ "items": "File"
+ },
+ "outputBinding": {
+ "glob": "$(inputs.out_dir)/*.csv"
+ },
+ "id": "#custom-samplesheet-split-by-settings__1.0.0.cwl/samplesheets"
+ }
+ ],
+ "successCodes": [
+ 0
+ ],
+ "https://schema.org/author": {
+ "class": "https://schema.org/Person",
+ "https://schema.org/name": "Sehrish Kanwal",
+ "https://schema.org/email": "sehrish.kanwal@umccr.org"
+ }
+ },
+ {
+ "class": "CommandLineTool",
+ "id": "#custom-touch-file__1.0.0.cwl",
+ "label": "custom-create-dummy-file v(1.0.0)",
+ "doc": "Documentation for custom-create-dummy-file v1.0.0\n",
+ "hints": [
+ {
+ "dockerPull": "alpine:latest",
+ "class": "DockerRequirement"
+ },
+ {
+ "coresMin": 1,
+ "ramMin": 1000,
+ "class": "ResourceRequirement",
+ "http://platform.illumina.com/rdf/ica/resources": {
+ "tier": "standard",
+ "type": "standard",
+ "size": "small"
+ }
+ }
+ ],
+ "requirements": [
+ {
+ "class": "InlineJavascriptRequirement"
+ }
+ ],
+ "baseCommand": [
+ "touch"
+ ],
+ "inputs": [
+ {
+ "label": "file name",
+ "doc": "Name of the file to create\n",
+ "type": "string",
+ "default": "dummy_file.txt",
+ "inputBinding": {
+ "position": 1
+ },
+ "id": "#custom-touch-file__1.0.0.cwl/file_name"
+ }
+ ],
+ "outputs": [
+ {
+ "label": "dummy file",
+ "doc": "Output dummy file\n",
+ "type": "File",
+ "outputBinding": {
+ "glob": "$(inputs.file_name)"
+ },
+ "id": "#custom-touch-file__1.0.0.cwl/dummy_file_output"
+ }
+ ],
+ "successCodes": [
+ 0
+ ],
+ "https://schema.org/author": {
+ "class": "https://schema.org/Person",
+ "https://schema.org/name": "Alexis Lucattini",
+ "https://schema.org/email": "Alexis.Lucattini@umccr.org",
+ "https://schema.org/identifier": "https://orcid.org/0000-0001-9754-647X"
+ }
+ },
+ {
+ "class": "CommandLineTool",
+ "id": "#multiqc-interop__1.2.1.cwl",
+ "label": "multiqc-interop v(1.2.1)",
+ "doc": "Producing QC report using interop matrix\n",
+ "hints": [
+ {
+ "dockerPull": "umccr/multiqc_dragen:1.2.1",
+ "class": "DockerRequirement"
+ },
+ {
+ "coresMin": 1,
+ "ramMin": 4000,
+ "class": "ResourceRequirement",
+ "http://platform.illumina.com/rdf/ica/resources": {
+ "type": "standard",
+ "size": "medium"
+ }
+ }
+ ],
+ "requirements": [
+ {
+ "listing": [
+ {
+ "entryname": "generate_interop_files.sh",
+ "entry": "#!/usr/bin/env bash\n\n# Fail on non-zero exit of subshell\nset -euo pipefail\n\n# Generate interop files\ninterop_summary --csv=1 \"$(inputs.input_directory.path)\" > interop_summary.csv\ninterop_index-summary --csv=1 \"$(inputs.input_directory.path)\" > interop_index-summary.csv\n"
+ },
+ {
+ "entryname": "run_multiqc_interop.sh",
+ "entry": "#!/usr/bin/env bash\n\n# Fail on non-zero exit of subshell\nset -euo pipefail\n\n# multiqc interop module needs to run a series of commands \n# ref: https://github.com/umccr-illumina/stratus/blob/806c76609af4755159b12cf5302d4e4e11cc614b/TES/multiqc.json\necho \"Generating interop files\" 1>&2\nbash generate_interop_files.sh\n\n# Now run multiqc\necho \"Running multiqc\" 1>&2\neval multiqc --module interop '\"\\${@}\"' interop_summary.csv interop_index-summary.csv\n"
+ }
+ ],
+ "class": "InitialWorkDirRequirement"
+ },
+ {
+ "class": "InlineJavascriptRequirement"
+ }
+ ],
+ "baseCommand": [
+ "bash",
+ "run_multiqc_interop.sh"
+ ],
+ "inputs": [
+ {
+ "label": "dummy file",
+ "doc": "testing inputs stream logic\nIf used will set input mode to stream on ICA which\nsaves having to download the entire input folder\n",
+ "type": [
+ "null",
+ "File"
+ ],
+ "streamable": true,
+ "id": "#multiqc-interop__1.2.1.cwl/dummy_file"
+ },
+ {
+ "label": "input directory",
+ "doc": "The bcl directory\n",
+ "type": "Directory",
+ "inputBinding": {
+ "position": 100
+ },
+ "id": "#multiqc-interop__1.2.1.cwl/input_directory"
+ },
+ {
+ "label": "output directory",
+ "doc": "The output directory, defaults to \"multiqc-outdir\"\n",
+ "type": [
+ "null",
+ "string"
+ ],
+ "default": "multiqc-outdir",
+ "inputBinding": {
+ "prefix": "--outdir"
+ },
+ "id": "#multiqc-interop__1.2.1.cwl/output_directory_name"
+ },
+ {
+ "label": "output filename",
+ "doc": "Report filename in html format.\nDefaults to 'multiqc-report.html'\n",
+ "type": [
+ "null",
+ "string"
+ ],
+ "default": "multiqc-report.html",
+ "inputBinding": {
+ "prefix": "--filename"
+ },
+ "id": "#multiqc-interop__1.2.1.cwl/output_filename"
+ },
+ {
+ "label": "title",
+ "doc": "Report title.\nPrinted as page header, used for filename if not otherwise specified.\n",
+ "type": "string",
+ "inputBinding": {
+ "prefix": "--title"
+ },
+ "id": "#multiqc-interop__1.2.1.cwl/title"
+ }
+ ],
+ "outputs": [
+ {
+ "label": "multiqc output",
+ "doc": "output dircetory with interop multiQC matrices\n",
+ "type": "Directory",
+ "outputBinding": {
+ "glob": "$(inputs.output_directory_name)"
+ },
+ "id": "#multiqc-interop__1.2.1.cwl/interop_multi_qc_out"
+ }
+ ],
+ "successCodes": [
+ 0
+ ],
+ "https://schema.org/author": {
+ "class": "https://schema.org/Person",
+ "https://schema.org/name": "Sehrish Kanwal",
+ "https://schema.org/email": "sehrish.kanwal@umccr.org"
+ }
+ },
+ {
+ "class": "CommandLineTool",
+ "id": "#multiqc__1.11.0.cwl",
+ "label": "multiqc v(1.11.0)",
+ "doc": "Documentation for multiqc v1.11.0\n",
+ "hints": [
+ {
+ "dockerPull": "quay.io/biocontainers/multiqc:1.11--pyhdfd78af_0",
+ "class": "DockerRequirement"
+ },
+ {
+ "coresMin": 2,
+ "ramMin": 4000,
+ "class": "ResourceRequirement",
+ "http://platform.illumina.com/rdf/ica/resources": {
+ "tier": "standard",
+ "type": "standard",
+ "size": "xlarge"
+ }
+ }
+ ],
+ "requirements": [
+ {
+ "listing": [
+ {
+ "entryname": "run_multiqc.sh",
+ "entry": "#!/usr/bin/env bash\n\n# Set up to fail\nset -euo pipefail\n\n# Create input dir\nmkdir \"$(get_input_dir())\"\n\n# Create an array of dirs\ninput_dir_path_array=( $(inputs.input_directories.map(function(a) {return '\"' + a.path + '\"';}).join(' ')) )\ninput_dir_basename_array=( $(inputs.input_directories.map(function(a) {return '\"' + a.basename + '\"';}).join(' ')) )\n\n# Iterate through input direcotires\nfor input_dir_path in \"\\${input_dir_path_array[@]}\"; do\n ln -s \"\\${input_dir_path}\" \"$(get_input_dir())/\"\ndone\n\n# Run multiqc\neval multiqc '\"\\${@}\"'\n\n# Unlink input directories - otherwise ICA tries to upload them onto gds (and fails)\nfor input_dir_basename in \"\\${input_dir_basename_array[@]}\"; do\n unlink \"$(get_input_dir())/\\${input_dir_basename}\"\ndone\n"
+ }
+ ],
+ "class": "InitialWorkDirRequirement"
+ },
+ {
+ "expressionLib": [
+ "var get_input_dir = function(){ /* Just returns the name of the input directory */ return \"multiqc_input_dir\"; }"
+ ],
+ "class": "InlineJavascriptRequirement"
+ }
+ ],
+ "baseCommand": [
+ "bash",
+ "run_multiqc.sh"
+ ],
+ "arguments": [
+ {
+ "position": 100,
+ "valueFrom": "$(get_input_dir())"
+ }
+ ],
+ "inputs": [
+ {
+ "label": "cl config",
+ "doc": "Override config from the cli\n",
+ "type": [
+ "null",
+ "string"
+ ],
+ "inputBinding": {
+ "prefix": "--cl_config"
+ },
+ "id": "#multiqc__1.11.0.cwl/cl_config"
+ },
+ {
+ "label": "comment",
+ "doc": "Custom comment, will be printed at the top of the report.\n",
+ "type": [
+ "null",
+ "string"
+ ],
+ "inputBinding": {
+ "prefix": "--comment"
+ },
+ "id": "#multiqc__1.11.0.cwl/comment"
+ },
+ {
+ "label": "config",
+ "doc": "Configuration file for bclconvert\n",
+ "type": [
+ "null",
+ "File"
+ ],
+ "streamable": true,
+ "inputBinding": {
+ "prefix": "--config"
+ },
+ "id": "#multiqc__1.11.0.cwl/config"
+ },
+ {
+ "label": "dummy file",
+ "doc": "testing inputs stream logic\nIf used will set input mode to stream on ICA which\nsaves having to download the entire input folder\n",
+ "type": [
+ "null",
+ "File"
+ ],
+ "streamable": true,
+ "id": "#multiqc__1.11.0.cwl/dummy_file"
+ },
+ {
+ "label": "input directories",
+ "doc": "The list of directories to place in the analysis\n",
+ "type": {
+ "type": "array",
+ "items": "Directory"
+ },
+ "id": "#multiqc__1.11.0.cwl/input_directories"
+ },
+ {
+ "label": "output directory",
+ "doc": "The output directory\n",
+ "type": "string",
+ "inputBinding": {
+ "prefix": "--outdir",
+ "valueFrom": "$(runtime.outdir)/$(self)"
+ },
+ "id": "#multiqc__1.11.0.cwl/output_directory_name"
+ },
+ {
+ "label": "output filename",
+ "doc": "Report filename in html format.\nDefaults to 'multiqc-report.html\"\n",
+ "type": "string",
+ "inputBinding": {
+ "prefix": "--filename"
+ },
+ "id": "#multiqc__1.11.0.cwl/output_filename"
+ },
+ {
+ "label": "title",
+ "doc": "Report title.\nPrinted as page header, used for filename if not otherwise specified.\n",
+ "type": "string",
+ "inputBinding": {
+ "prefix": "--title"
+ },
+ "id": "#multiqc__1.11.0.cwl/title"
+ }
+ ],
+ "outputs": [
+ {
+ "label": "output directory",
+ "doc": "Directory that contains all multiqc analysis data\n",
+ "type": "Directory",
+ "outputBinding": {
+ "glob": "$(inputs.output_directory_name)"
+ },
+ "id": "#multiqc__1.11.0.cwl/output_directory"
+ },
+ {
+ "label": "output file",
+ "doc": "Output html file\n",
+ "type": "File",
+ "outputBinding": {
+ "glob": "$(inputs.output_directory_name)/$(inputs.output_filename)"
+ },
+ "id": "#multiqc__1.11.0.cwl/output_file"
+ }
+ ],
+ "successCodes": [
+ 0
+ ],
+ "https://schema.org/author": {
+ "class": "https://schema.org/Person",
+ "https://schema.org/name": "Alexis Lucattini",
+ "https://schema.org/email": "Alexis.Lucattini@umccr.org",
+ "https://schema.org/identifier": "https://orcid.org/0000-0001-9754-647X"
+ }
+ },
+ {
+ "class": "Workflow",
+ "id": "#main",
+ "label": "bcl-conversion v(3.7.5)",
+ "doc": "Runs bcl-convert v3.7.5 with multiqc output of the bcl input directory\n",
+ "requirements": [
+ {
+ "class": "InlineJavascriptRequirement"
+ },
+ {
+ "class": "MultipleInputFeatureRequirement"
+ },
+ {
+ "class": "ScatterFeatureRequirement"
+ },
+ {
+ "types": [
+ {
+ "$import": "#settings-by-samples__1.0.0.yaml"
+ },
+ {
+ "$import": "#fastq-list-row__1.0.0.yaml"
+ }
+ ],
+ "class": "SchemaDefRequirement"
+ },
+ {
+ "class": "StepInputExpressionRequirement"
+ }
+ ],
+ "inputs": [
+ {
+ "label": "bcl input directory",
+ "doc": "Path to the bcl files\n",
+ "type": "Directory",
+ "id": "#bcl_input_directory"
+ },
+ {
+ "label": "bcl only lane",
+ "doc": "Convert only the specified lane number. The value must\nbe less than or equal to the number of lanes specified in the\nRunInfo.xml. Must be a single integer value.\n",
+ "type": [
+ "null",
+ "int"
+ ],
+ "id": "#bcl_only_lane_bcl_conversion"
+ },
+ {
+ "label": "bcl sampleproject subdirectories",
+ "doc": "true \u2014 Allows creation of Sample_Project subdirectories\nas specified in the sample sheet. This option must be set to true for\nthe Sample_Project column in the data section to be used.\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "id": "#bcl_sampleproject_subdirectories_bcl_conversion"
+ },
+ {
+ "label": "delete undetermined indices",
+ "doc": "Delete undetermined indices on completion of the run\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "default": true,
+ "id": "#delete_undetermined_indices_bcl_conversion"
+ },
+ {
+ "label": "first tile only",
+ "doc": "true \u2014 Only process the first tile of the first swath of the\n top surface of each lane specified in the sample sheet.\nfalse \u2014 Process all tiles in each lane, as specified in the sample\n sheet.\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "id": "#first_tile_only_bcl_conversion"
+ },
+ {
+ "label": "ignore missing samples",
+ "doc": "Remove the samples not present in the override cycles record\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "default": true,
+ "id": "#ignore_missing_samples"
+ },
+ {
+ "label": "runfolder name",
+ "doc": "Required - used in naming run specific folder, reports and headings\n",
+ "type": "string",
+ "id": "#runfolder_name"
+ },
+ {
+ "label": "sample sheet",
+ "doc": "The path to the full samplesheet\n",
+ "type": "File",
+ "id": "#samplesheet"
+ },
+ {
+ "label": "samplesheet outdir",
+ "doc": "Output directory of the samplesheets split by settings\n",
+ "type": [
+ "null",
+ "string"
+ ],
+ "id": "#samplesheet_outdir"
+ },
+ {
+ "label": "set samplesheet output format",
+ "doc": "Convert headers to v2 samplesheet format\n",
+ "type": [
+ "null",
+ {
+ "type": "enum",
+ "symbols": [
+ "#/samplesheet_output_format/v1",
+ "#/samplesheet_output_format/v2"
+ ]
+ }
+ ],
+ "id": "#samplesheet_output_format"
+ },
+ {
+ "label": "settings by samples",
+ "doc": "List of settings by samples\n",
+ "type": [
+ "null",
+ {
+ "type": "array",
+ "items": "#settings-by-samples__1.0.0.yaml/settings-by-samples"
+ }
+ ],
+ "id": "#settings_by_samples"
+ },
+ {
+ "label": "strict mode bcl conversion",
+ "doc": "true \u2014 Abort the program if any filter, locs, bcl, or bci lane\nfiles are missing or corrupt.\nfalse \u2014 Continue processing if any filter, locs, bcl, or bci lane files\nare missing. Return a warning message for each missing or corrupt\nfile.\n",
+ "type": [
+ "null",
+ "boolean"
+ ],
+ "id": "#strict_mode_bcl_conversion"
+ }
+ ],
+ "steps": [
+ {
+ "label": "bcl convert",
+ "doc": "BCLConvert is then scattered across each of the samplesheets.\n",
+ "scatter": [
+ "#bcl_convert_step/samplesheet",
+ "#bcl_convert_step/output_directory"
+ ],
+ "scatterMethod": "dotproduct",
+ "in": [
+ {
+ "source": "#bcl_input_directory",
+ "id": "#bcl_convert_step/bcl_input_directory"
+ },
+ {
+ "source": "#bcl_only_lane_bcl_conversion",
+ "id": "#bcl_convert_step/bcl_only_lane"
+ },
+ {
+ "source": "#bcl_sampleproject_subdirectories_bcl_conversion",
+ "id": "#bcl_convert_step/bcl_sampleproject_subdirectories"
+ },
+ {
+ "source": "#delete_undetermined_indices_bcl_conversion",
+ "id": "#bcl_convert_step/delete_undetermined_indices"
+ },
+ {
+ "source": "#first_tile_only_bcl_conversion",
+ "id": "#bcl_convert_step/first_tile_only"
+ },
+ {
+ "source": "#get_batch_dirs/batch_names",
+ "id": "#bcl_convert_step/output_directory"
+ },
+ {
+ "source": "#samplesheet_split_by_settings_step/samplesheets",
+ "id": "#bcl_convert_step/samplesheet"
+ },
+ {
+ "source": "#strict_mode_bcl_conversion",
+ "id": "#bcl_convert_step/strict_mode"
+ }
+ ],
+ "out": [
+ {
+ "id": "#bcl_convert_step/bcl_convert_directory_output"
+ },
+ {
+ "id": "#bcl_convert_step/fastq_list_rows"
+ }
+ ],
+ "run": "#bclConvert__3.7.5.cwl",
+ "id": "#bcl_convert_step"
+ },
+ {
+ "label": "bclconvert qc step",
+ "doc": "The bclconvert qc step - from scatter this takes in an array of dirs\n",
+ "in": [
+ {
+ "valueFrom": "${\n return JSON.stringify({\"bclconvert\": { \"genome_size\": \"hg38_genome\" }});\n }\n",
+ "id": "#bclconvert_qc_step/cl_config"
+ },
+ {
+ "source": "#create_dummy_file_step/dummy_file_output",
+ "id": "#bclconvert_qc_step/dummy_file"
+ },
+ {
+ "source": "#bcl_convert_step/bcl_convert_directory_output",
+ "id": "#bclconvert_qc_step/input_directories"
+ },
+ {
+ "source": "#runfolder_name",
+ "valueFrom": "$(self)_bclconvert_multiqc",
+ "id": "#bclconvert_qc_step/output_directory_name"
+ },
+ {
+ "source": "#runfolder_name",
+ "valueFrom": "$(self)_bclconvert_multiqc.html",
+ "id": "#bclconvert_qc_step/output_filename"
+ },
+ {
+ "source": "#runfolder_name",
+ "valueFrom": "UMCCR MultiQC BCLConvert report for $(self)",
+ "id": "#bclconvert_qc_step/title"
+ }
+ ],
+ "out": [
+ {
+ "id": "#bclconvert_qc_step/output_directory"
+ }
+ ],
+ "run": "#multiqc__1.11.0.cwl",
+ "id": "#bclconvert_qc_step"
+ },
+ {
+ "label": "create dummy file",
+ "doc": "Intermediate step for letting multiqc-interop be placed in stream mode\n",
+ "in": [],
+ "out": [
+ {
+ "id": "#create_dummy_file_step/dummy_file_output"
+ }
+ ],
+ "run": "#custom-touch-file__1.0.0.cwl",
+ "id": "#create_dummy_file_step"
+ },
+ {
+ "label": "flatten fastq list rows array",
+ "doc": "fastq list rows is an array and bcl convert is from a directory output.\nThis scatters the arrays to a single array\n",
+ "in": [
+ {
+ "source": "#bcl_convert_step/fastq_list_rows",
+ "id": "#flatten_fastq_list_rows_array/arrayTwoDim"
+ }
+ ],
+ "out": [
+ {
+ "id": "#flatten_fastq_list_rows_array/array1d"
+ }
+ ],
+ "run": "#flatten-array-fastq-list__1.0.0.cwl",
+ "id": "#flatten_fastq_list_rows_array"
+ },
+ {
+ "label": "get batch directories",
+ "doc": "Get the directory names of each of the directories we wish to scatter over\n",
+ "in": [
+ {
+ "source": "#samplesheet_split_by_settings_step/samplesheets",
+ "id": "#get_batch_dirs/samplesheets"
+ }
+ ],
+ "out": [
+ {
+ "id": "#get_batch_dirs/batch_names"
+ }
+ ],
+ "run": "#get-samplesheet-midfix-regex__1.0.0.cwl",
+ "id": "#get_batch_dirs"
+ },
+ {
+ "label": "interop qc step",
+ "doc": "Run the multiqc by first also generating the interop files for use\n",
+ "in": [
+ {
+ "source": "#create_dummy_file_step/dummy_file_output",
+ "id": "#interop_qc_step/dummy_file"
+ },
+ {
+ "source": "#bcl_input_directory",
+ "id": "#interop_qc_step/input_directory"
+ },
+ {
+ "source": "#runfolder_name",
+ "valueFrom": "$(self)_interop_multiqc",
+ "id": "#interop_qc_step/output_directory_name"
+ },
+ {
+ "source": "#runfolder_name",
+ "valueFrom": "$(self)_interop_multiqc.html",
+ "id": "#interop_qc_step/output_filename"
+ },
+ {
+ "source": "#runfolder_name",
+ "valueFrom": "UMCCR MultiQC Interop report for $(self)",
+ "id": "#interop_qc_step/title"
+ }
+ ],
+ "out": [
+ {
+ "id": "#interop_qc_step/interop_multi_qc_out"
+ }
+ ],
+ "run": "#multiqc-interop__1.2.1.cwl",
+ "id": "#interop_qc_step"
+ },
+ {
+ "label": "Split samplesheet by settings step",
+ "doc": "Samplesheet is split by the different input types.\nThese are generally a difference in override cycles parameters or adapter trimming settings\nThis then scatters multiple bclconvert workflows split by sample id\n",
+ "in": [
+ {
+ "source": "#ignore_missing_samples",
+ "id": "#samplesheet_split_by_settings_step/ignore_missing_samples"
+ },
+ {
+ "source": "#samplesheet_outdir",
+ "id": "#samplesheet_split_by_settings_step/out_dir"
+ },
+ {
+ "source": "#samplesheet",
+ "id": "#samplesheet_split_by_settings_step/samplesheet_csv"
+ },
+ {
+ "source": "#samplesheet_output_format",
+ "id": "#samplesheet_split_by_settings_step/samplesheet_format"
+ },
+ {
+ "source": "#settings_by_samples",
+ "id": "#samplesheet_split_by_settings_step/settings_by_samples"
+ }
+ ],
+ "out": [
+ {
+ "id": "#samplesheet_split_by_settings_step/samplesheets"
+ },
+ {
+ "id": "#samplesheet_split_by_settings_step/samplesheet_outdir"
+ }
+ ],
+ "run": "#custom-samplesheet-split-by-settings__1.0.0.cwl",
+ "id": "#samplesheet_split_by_settings_step"
+ }
+ ],
+ "outputs": [
+ {
+ "label": "bclconvert multiqc",
+ "doc": "multiqc directory output that contains bclconvert multiqc data\n",
+ "type": "Directory",
+ "outputSource": "#bclconvert_qc_step/output_directory",
+ "id": "#bclconvert_multiqc_out"
+ },
+ {
+ "label": "Output fastq directores",
+ "doc": "The outputs from the bclconvert-step\n",
+ "type": {
+ "type": "array",
+ "items": "Directory"
+ },
+ "outputSource": "#bcl_convert_step/bcl_convert_directory_output",
+ "id": "#fastq_directories"
+ },
+ {
+ "label": "rows of fastq list csv file",
+ "doc": "Contains the fastq list row schema for each of the output fastq files\n",
+ "type": {
+ "type": "array",
+ "items": "#fastq-list-row__1.0.0.yaml/fastq-list-row"
+ },
+ "outputSource": "#flatten_fastq_list_rows_array/array1d",
+ "id": "#fastq_list_rows"
+ },
+ {
+ "label": "interop multiqc",
+ "doc": "multiqc directory output that contains interop data\n",
+ "type": "Directory",
+ "outputSource": "#interop_qc_step/interop_multi_qc_out",
+ "id": "#interop_multiqc_out"
+ },
+ {
+ "label": "split samplesheets",
+ "doc": "List of samplesheets split by override cycles\n",
+ "type": {
+ "type": "array",
+ "items": "File"
+ },
+ "outputSource": "#samplesheet_split_by_settings_step/samplesheets",
+ "id": "#split_sheets"
+ },
+ {
+ "label": "split sheets dir",
+ "doc": "The directory containing the samplesheets used for each bcl convert\n",
+ "type": "Directory",
+ "outputSource": "#samplesheet_split_by_settings_step/samplesheet_outdir",
+ "id": "#split_sheets_dir"
+ }
+ ],
+ "https://schema.org/author": {
+ "class": "https://schema.org/Person",
+ "https://schema.org/name": "Sehrish Kanwal",
+ "https://schema.org/email": "sehrish.kanwal@umccr.org"
+ }
+ }
+ ],
+ "cwlVersion": "v1.1",
+ "$schemas": [
+ "https://schema.org/version/latest/schemaorg-current-http.rdf"
+ ]
+}
diff --git a/tests/wf/trick_defaults2.cwl b/tests/wf/trick_defaults2.cwl
new file mode 100644
index 0000000000..9b53fba927
--- /dev/null
+++ b/tests/wf/trick_defaults2.cwl
@@ -0,0 +1,14 @@
+#!/usr/bin/env cwl-runner
+class: CommandLineTool
+cwlVersion: v1.0
+inputs:
+ inp1:
+ type: File
+ default:
+ class: File
+ location: hello.txt
+ secondaryFiles:
+ - class: Directory
+ location: indir1
+outputs: []
+baseCommand: true
diff --git a/tests/with_doc.cwl b/tests/with_doc.cwl
index a68c8e076b..77c6c0f4b7 100644
--- a/tests/with_doc.cwl
+++ b/tests/with_doc.cwl
@@ -1,3 +1,4 @@
+#!/usr/bin/env cwl-runner
cwlVersion: v1.0
class: CommandLineTool
inputs: []
diff --git a/tests/without_doc.cwl b/tests/without_doc.cwl
index ef1688a8d3..85765e7d82 100644
--- a/tests/without_doc.cwl
+++ b/tests/without_doc.cwl
@@ -1,3 +1,4 @@
+#!/usr/bin/env cwl-runner
cwlVersion: v1.0
class: CommandLineTool
inputs: []
diff --git a/tox.ini b/tox.ini
index 4d4b4a5cd4..9bf36624ba 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,17 +1,20 @@
[tox]
envlist =
- py{36,37,38,39}-lint,
- py{36,37,38,39}-unit,
- py{36,37,38,39}-bandit,
- py{36,37,38}-mypy,
- py38-lint-readme,
- py38-shellcheck,
- py38-pydocstyle
+ py3{6,7,8,9}-lint
+ py3{6,7,8,9}-unit
+ py3{6,7,8,9}-bandit
+ py3{6,7,8,9}-mypy
+ py39-lintreadme
+ py39-shellcheck
+ py39-pydocstyle
-skipsdist = True
skip_missing_interpreters = True
-[travis]
+[pytest]
+addopts=--ignore cwltool/schemas --basetemp ./tmp -n auto
+testpaths = tests
+
+[gh-actions]
python =
3.6: py36
3.7: py37
@@ -19,69 +22,58 @@ python =
3.9: py39
[testenv]
+skipsdist =
+ py3{5,7,8,9}-!{unit,mypy,lintreadme} = True
+
description =
- py{36,37,38,39}-unit: Run the unit tests
- py{36,37,38,39}-lint: Lint the Python code
- py{36,37,38,39}-bandit: Search for common security issues
- py{36,37,38}-mypy: Check for type safety
- py38-pydocstyle: docstring style checker
- py38-shellcheck: syntax check for shell scripts
+ py3{6,7,8,9}-unit: Run the unit tests
+ py3{6,7,8,9}-lint: Lint the Python code
+ py3{6,7,8,9}-bandit: Search for common security issues
+ py3{6,7,8,9}-mypy: Check for type safety
+ py39-pydocstyle: docstring style checker
+ py39-shellcheck: syntax check for shell scripts
+ py39-lintreadme: Lint the README.rst→.md conversion
passenv =
CI
- TRAVIS
- TRAVIS_*
+ GITHUB_*
PROOT_NO_SECCOMP
+
+extras =
+ py3{6,7,8,9}-unit: deps
+
deps =
- py{36,37,38,39}-{unit,lint,bandit,mypy}: -rrequirements.txt
- py{36,37,38,39}-unit: codecov
- py{36,37,38,39}-unit: pytest-xdist
- py{36,37,38,39}-unit: pytest-cov
- py{36,37,38,39}-{unit,mypy}: -rtest-requirements.txt
- py{36,37,38,39}-unit: galaxy-tool-util
- py{36,37,38,39}-lint: flake8-bugbear
- py{36,37,38,39}-lint: black
- py{36,37,38,39}-bandit: bandit
- py{36,37,38}-mypy: mypy==0.790
+ py3{6,7,8,9}-{unit,lint,bandit,mypy}: -rrequirements.txt
+ py3{6,7,8,9}-{unit,mypy}: -rtest-requirements.txt
+ py3{6,7,8,9}-lint: flake8-bugbear
+ py3{6,7,8,9}-lint: black
+ py3{6,7,8,9}-bandit: bandit
+ py3{6,7,8,9}-bandit: importlib_metadata != 4.8.0
+ py3{6,7,8,9}-mypy: -rmypy_requirements.txt
+ py39-pydocstyle: pydocstyle
+ py39-pydocstyle: diff-cover
+ py39-lintreadme: twine
+ py39-lintreadme: wheel
+ py39-lintreadme: readme_renderer[md]
setenv =
- py{36,37,38,39}-unit: LC_ALL = C
+ py3{6,7,8,9}-unit: LC_ALL = C.UTF-8
-commands =
- py{36,37,38,39}-unit: python3 -m pip install -U pip setuptools wheel
- py{36,37,38,39}-unit: python3 -m pip install -e .[deps]
- py{36,37,38,39}-unit: python3 -m pip install -rtest-requirements.txt
- py{36,37,38,39}-unit: coverage run --parallel-mode -m pytest --strict {posargs}
- py{36,37,38,39}-unit: coverage combine
- py{36,37,38,39}-unit: coverage report
- py{36,37,38,39}-unit: coverage xml
- py{36,37,38,39}-unit: codecov --file coverage.xml
- py{36,37,38,39}-bandit: bandit -r cwltool
- py{36,37,38,39}-lint: flake8 cwltool setup.py
- py{36,37,38}-mypy: make mypy
- py{36,37,38}-mypy: make mypyc
- py38-shellcheck: make shellcheck
+commands_pre =
+ py3{6,7,8,9}-unit: python -m pip install -U pip setuptools wheel
+ py39-lintreadme: python setup.py sdist --dist-dir {distdir}
+ py39-lintreadme: python setup.py bdist_wheel --dist-dir {distdir}
-whitelist_externals =
- py{36,37,38,39}-lint: flake8
- py{36,37,38,39}-lint: black
- py{36,37,38}-{mypy,shellcheck}: make
- py38-shellcheck: shellcheck
+commands =
+ py3{6,7,8,9}-unit: make coverage-report coverage.xml PYTEST_EXTRA={posargs}
+ py3{6,7,8,9}-bandit: bandit -r cwltool
+ py3{6,7,8,9}-lint: make flake8 format-check
+ py3{6,7,8,9}-mypy: make mypy mypyc PYTEST_EXTRA={posargs}
+ py39-shellcheck: make shellcheck
+ py39-pydocstyle: make diff_pydocstyle_report
+ py39-lintreadme: twine check {distdir}/*
-[testenv:py38-pydocstyle]
-whitelist_externals = make
-commands = make diff_pydocstyle_report
-deps =
- pydocstyle
- diff-cover
+skip_install =
+ py3{6,7,8,9}-{bandit,lint,mypy,shellcheck,pydocstyle,lintreadme}: true
-[testenv:py38-lint-readme]
-description = Lint the README.rst->.md conversion
-commands =
- python setup.py sdist
- python setup.py bdist_wheel
- twine check dist/*
-deps =
- twine
- wheel
- readme_renderer[md]
+allowlist_externals = make
diff --git a/travis.bash b/travis.bash
deleted file mode 100755
index 1d4bb628cb..0000000000
--- a/travis.bash
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/bin/bash
-venv() {
- if ! test -d "$1" ; then
- virtualenv -p python3 "$1"
- fi
- # shellcheck source=/dev/null
- source "$1"/bin/activate
-}
-version=${version:-v1.0}
-if [[ "$version" = "v1.0" ]] ; then
- wget https://github.com/common-workflow-language/common-workflow-language/archive/main.tar.gz
- tar xzf main.tar.gz && rm main.tar.gz
-else
- # shellcheck disable=SC2001
- repo=$(echo "$version" | sed 's/\(v[0-9]*\.\)\([0-9]*\).*/\1\2/')
- wget https://github.com/common-workflow-language/cwl-"${repo}"/archive/main.tar.gz
- tar xzf main.tar.gz && rm main.tar.gz
-fi
-
-docker pull node:slim
-
-# shellcheck disable=SC2043
-for CONTAINER in docker
-# for CONTAINER in docker singularity
-# singularity having issues on ci.commonwl.org; tests pass with https://gist.github.com/mr-c/0ec90d717617d074017c0cb38b72d1a4
-do
- venv cwltool-venv3
- pip3 install -U setuptools wheel pip
- pip3 uninstall -y cwltool
- pip3 install -e .
- pip3 install "cwltest>=1.0.20180518074130" codecov
- if [[ "$version" = "v1.0" ]]
- then
- DRAFT="DRAFT=v1.0"
- pushd common-workflow-language-main || exit 1
- else
- pushd cwl-"${repo}"-main || exit 1
- fi
- rm -f .coverage* coverage.xml
- source=$(realpath ../cwltool)
- COVERAGE_RC=${PWD}/.coveragerc
- cat > "${COVERAGE_RC}" < "${CWLTOOL_WITH_COV}" < None: ...
- def __getattr__(self, name: str) -> _FuncPointer: ...
- def __getitem__(self, name: str) -> _FuncPointer: ...
-if sys.platform == 'win32':
- class OleDLL(CDLL): ...
- class WinDLL(CDLL): ...
-class PyDLL(CDLL): ...
-
-class LibraryLoader(Generic[_DLLT]):
- def __init__(self, dlltype: Type[_DLLT]) -> None: ...
- def __getattr__(self, name: str) -> _DLLT: ...
- def __getitem__(self, name: str) -> _DLLT: ...
- def LoadLibrary(self, name: str) -> _DLLT: ...
-
-cdll: LibraryLoader[CDLL] = ...
-if sys.platform == 'win32':
- windll: LibraryLoader[WinDLL] = ...
- oledll: LibraryLoader[OleDLL] = ...
-pydll: LibraryLoader[PyDLL] = ...
-pythonapi: PyDLL = ...
-
-class _CDataMeta(type):
- # By default mypy complains about the following two methods, because strictly speaking cls
- # might not be a Type[_CT]. However this can never actually happen, because the only class that
- # uses _CDataMeta as its metaclass is _CData. So it's safe to ignore the errors here.
- def __mul__(cls: Type[_CT], other: int) -> Type[Array[_CT]]: ... # type: ignore
- def __rmul__(cls: Type[_CT], other: int) -> Type[Array[_CT]]: ... # type: ignore
-class _CData(metaclass=_CDataMeta):
- _b_base: int = ...
- _b_needsfree_: bool = ...
- _objects: Optional[Mapping[Any, int]] = ...
- @classmethod
- def from_buffer(cls: Type[_CT], source: bytearray, offset: int = ...) -> _CT: ...
- @classmethod
- def from_buffer_copy(cls: Type[_CT], source: bytearray, offset: int = ...) -> _CT: ...
- @classmethod
- def from_address(cls: Type[_CT], address: int) -> _CT: ...
- @classmethod
- def from_param(cls: Type[_CT], obj: Any) -> _UnionT[_CT, _CArgObject]: ...
- @classmethod
- def in_dll(cls: Type[_CT], library: CDLL, name: str) -> _CT: ...
-
-class _PointerLike(_CData): ...
-
-_ECT = Callable[[Optional[Type[_CData]],
- _FuncPointer,
- Tuple[_CData, ...]],
- _CData]
-_PF = _UnionT[
- Tuple[int],
- Tuple[int, str],
- Tuple[int, str, Any]
-]
-class _FuncPointer(_PointerLike, _CData):
- restype: _UnionT[Type[_CData], Callable[[int], None], None] = ...
- argtypes: Sequence[Type[_CData]] = ...
- errcheck: _ECT = ...
- @overload
- def __init__(self, address: int) -> None: ...
- @overload
- def __init__(self, callable: Callable[..., Any]) -> None: ...
- @overload
- def __init__(self, func_spec: Tuple[_UnionT[str, int], CDLL],
- paramflags: Tuple[_PF, ...] = ...) -> None: ...
- @overload
- def __init__(self, vtlb_index: int, name: str,
- paramflags: Tuple[_PF, ...] = ...,
- iid: pointer[c_int] = ...) -> None: ...
- def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
-
-class ArgumentError(Exception): ...
-
-
-def CFUNCTYPE(restype: Optional[Type[_CData]],
- *argtypes: Type[_CData],
- use_errno: bool = ...,
- use_last_error: bool = ...) -> Type[_FuncPointer]: ...
-if sys.platform == 'win32':
- def WINFUNCTYPE(restype: Optional[Type[_CData]],
- *argtypes: Type[_CData],
- use_errno: bool = ...,
- use_last_error: bool = ...) -> Type[_FuncPointer]: ...
-def PYFUNCTYPE(restype: Optional[Type[_CData]],
- *argtypes: Type[_CData]) -> Type[_FuncPointer]: ...
-
-class _CArgObject: ...
-
-# Any type that can be implicitly converted to c_void_p when passed as a C function argument.
-# (bytes is not included here, see below.)
-_CVoidPLike = _UnionT[_PointerLike, Array[Any], _CArgObject, int]
-# Same as above, but including types known to be read-only (i. e. bytes).
-# This distinction is not strictly necessary (ctypes doesn't differentiate between const
-# and non-const pointers), but it catches errors like memmove(b'foo', buf, 4)
-# when memmove(buf, b'foo', 4) was intended.
-_CVoidConstPLike = _UnionT[_CVoidPLike, bytes]
-
-def addressof(obj: _CData) -> int: ...
-def alignment(obj_or_type: _UnionT[_CData, Type[_CData]]) -> int: ...
-def byref(obj: _CData, offset: int = ...) -> _CArgObject: ...
-_PT = TypeVar('_PT', bound=_PointerLike)
-def cast(obj: _UnionT[_CData, _CArgObject], type: Type[_PT]) -> _PT: ...
-def create_string_buffer(init_or_size: _UnionT[int, bytes],
- size: Optional[int] = ...) -> Array[c_char]: ...
-c_buffer = create_string_buffer
-def create_unicode_buffer(init_or_size: _UnionT[int, Text],
- size: Optional[int] = ...) -> Array[c_wchar]: ...
-if sys.platform == 'win32':
- def DllCanUnloadNow() -> int: ...
- def DllGetClassObject(rclsid: Any, riid: Any, ppv: Any) -> int: ... # TODO not documented
- def FormatError(code: int) -> str: ...
- def GetLastError() -> int: ...
-def get_errno() -> int: ...
-if sys.platform == 'win32':
- def get_last_error() -> int: ...
-def memmove(dst: _CVoidPLike, src: _CVoidConstPLike, count: int) -> None: ...
-def memset(dst: _CVoidPLike, c: int, count: int) -> None: ...
-def POINTER(type: Type[_CT]) -> Type[pointer[_CT]]: ...
-
-# The real ctypes.pointer is a function, not a class. The stub version of pointer behaves like
-# ctypes._Pointer in that it is the base class for all pointer types. Unlike the real _Pointer,
-# it can be instantiated directly (to mimic the behavior of the real pointer function).
-class pointer(Generic[_CT], _PointerLike, _CData):
- _type_: ClassVar[Type[_CT]] = ...
- contents: _CT = ...
- def __init__(self, arg: _CT = ...) -> None: ...
- @overload
- def __getitem__(self, i: int) -> _CT: ...
- @overload
- def __getitem__(self, s: slice) -> List[_CT]: ...
- @overload
- def __setitem__(self, i: int, o: _CT) -> None: ...
- @overload
- def __setitem__(self, s: slice, o: Iterable[_CT]) -> None: ...
-
-def resize(obj: _CData, size: int) -> None: ...
-if sys.version_info < (3,):
- def set_conversion_mode(encoding: str, errors: str) -> Tuple[str, str]: ...
-def set_errno(value: int) -> int: ...
-if sys.platform == 'win32':
- def set_last_error(value: int) -> int: ...
-def sizeof(obj_or_type: _UnionT[_CData, Type[_CData]]) -> int: ...
-def string_at(address: _CVoidConstPLike, size: int = ...) -> bytes: ...
-if sys.platform == 'win32':
- def WinError(code: Optional[int] = ...,
- desc: Optional[str] = ...) -> WindowsError: ...
-def wstring_at(address: _CVoidConstPLike, size: int = ...) -> str: ...
-
-class _SimpleCData(Generic[_T], _CData):
- value: _T = ...
- def __init__(self, value: _T = ...) -> None: ...
-
-class c_byte(_SimpleCData[int]): ...
-
-class c_char(_SimpleCData[bytes]):
- def __init__(self, value: _UnionT[int, bytes] = ...) -> None: ...
-class c_char_p(_PointerLike, _SimpleCData[Optional[bytes]]):
- def __init__(self, value: Optional[_UnionT[int, bytes]] = ...) -> None: ...
-
-class c_double(_SimpleCData[float]): ...
-class c_longdouble(_SimpleCData[float]): ...
-class c_float(_SimpleCData[float]): ...
-
-class c_int(_SimpleCData[int]): ...
-class c_int8(_SimpleCData[int]): ...
-class c_int16(_SimpleCData[int]): ...
-class c_int32(_SimpleCData[int]): ...
-class c_int64(_SimpleCData[int]): ...
-
-class c_long(_SimpleCData[int]): ...
-class c_longlong(_SimpleCData[int]): ...
-
-class c_short(_SimpleCData[int]): ...
-
-class c_size_t(_SimpleCData[int]): ...
-class c_ssize_t(_SimpleCData[int]): ...
-
-class c_ubyte(_SimpleCData[int]): ...
-
-class c_uint(_SimpleCData[int]): ...
-class c_uint8(_SimpleCData[int]): ...
-class c_uint16(_SimpleCData[int]): ...
-class c_uint32(_SimpleCData[int]): ...
-class c_uint64(_SimpleCData[int]): ...
-
-class c_ulong(_SimpleCData[int]): ...
-class c_ulonglong(_SimpleCData[int]): ...
-
-class c_ushort(_SimpleCData[int]): ...
-
-class c_void_p(_PointerLike, _SimpleCData[Optional[int]]): ...
-
-class c_wchar(_SimpleCData[Text]): ...
-class c_wchar_p(_PointerLike, _SimpleCData[Optional[Text]]):
- def __init__(self, value: Optional[_UnionT[int, Text]] = ...) -> None: ...
-
-class c_bool(_SimpleCData[bool]):
- def __init__(self, value: bool) -> None: ...
-
-if sys.platform == 'win32':
- class HRESULT(_SimpleCData[int]): ... # TODO undocumented
-
-class py_object(_SimpleCData[_T]): ...
-
-class _CField:
- offset: int = ...
- size: int = ...
-class _StructUnionMeta(_CDataMeta):
- _fields_: Sequence[_UnionT[Tuple[str, Type[_CData]], Tuple[str, Type[_CData], int]]] = ...
- _pack_: int = ...
- _anonymous_: Sequence[str] = ...
- def __getattr__(self, name: str) -> _CField: ...
-class _StructUnionBase(_CData, metaclass=_StructUnionMeta):
- def __init__(self, *args: Any, **kw: Any) -> None: ...
- def __getattr__(self, name: str) -> Any: ...
- def __setattr__(self, name: str, value: Any) -> None: ...
-
-class Union(_StructUnionBase): ...
-class Structure(_StructUnionBase): ...
-class BigEndianStructure(Structure): ...
-class LittleEndianStructure(Structure): ...
-
-class Array(Generic[_T], _CData):
- _length_: ClassVar[int] = ...
- _type_: ClassVar[Type[_T]] = ...
- raw: bytes = ... # TODO only available with _T == c_char
- value: bytes = ... # TODO only available with _T == c_char
- # TODO These methods cannot be annotated correctly at the moment.
- # All of these "Any"s stand for the array's element type, but it's not possible to use _T here,
- # because of a special feature of ctypes.
- # By default, when accessing an element of an Array[_T], the returned object has type _T.
- # However, when _T is a "simple type" like c_int, ctypes automatically "unboxes" the object
- # and converts it to the corresponding Python primitive. For example, when accessing an element
- # of an Array[c_int], a Python int object is returned, not a c_int.
- # This behavior does *not* apply to subclasses of "simple types".
- # If MyInt is a subclass of c_int, then accessing an element of an Array[MyInt] returns
- # a MyInt, not an int.
- # This special behavior is not easy to model in a stub, so for now all places where
- # the array element type would belong are annotated with Any instead.
- def __init__(self, *args: Any) -> None: ...
- @overload
- def __getitem__(self, i: int) -> Any: ...
- @overload
- def __getitem__(self, s: slice) -> List[Any]: ...
- @overload
- def __setitem__(self, i: int, o: Any) -> None: ...
- @overload
- def __setitem__(self, s: slice, o: Iterable[Any]) -> None: ...
- def __iter__(self) -> Iterator[Any]: ...
- # Can't inherit from Sized because the metaclass conflict between
- # Sized and _CData prevents using _CDataMeta.
- def __len__(self) -> int: ...
diff --git a/typeshed/2and3/ctypes/util.pyi b/typeshed/2and3/ctypes/util.pyi
deleted file mode 100644
index 7077d9d2f1..0000000000
--- a/typeshed/2and3/ctypes/util.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for ctypes.util
-
-from typing import Optional
-import sys
-
-def find_library(name: str) -> Optional[str]: ...
-if sys.platform == 'win32':
- def find_msvcrt() -> Optional[str]: ...
diff --git a/typeshed/2and3/ctypes/wintypes.pyi b/typeshed/2and3/ctypes/wintypes.pyi
deleted file mode 100644
index c5a6226b25..0000000000
--- a/typeshed/2and3/ctypes/wintypes.pyi
+++ /dev/null
@@ -1,209 +0,0 @@
-from ctypes import (
- _SimpleCData, Array, Structure, c_byte, c_char, c_char_p, c_double, c_float, c_int, c_long,
- c_longlong, c_short, c_uint, c_ulong, c_ulonglong, c_ushort, c_void_p, c_wchar, c_wchar_p,
- pointer,
-)
-
-BYTE = c_byte
-WORD = c_ushort
-DWORD = c_ulong
-CHAR = c_char
-WCHAR = c_wchar
-UINT = c_uint
-INT = c_int
-DOUBLE = c_double
-FLOAT = c_float
-BOOLEAN = BYTE
-BOOL = c_long
-class VARIANT_BOOL(_SimpleCData[bool]): ...
-ULONG = c_ulong
-LONG = c_long
-USHORT = c_ushort
-SHORT = c_short
-LARGE_INTEGER = c_longlong
-_LARGE_INTEGER = c_longlong
-ULARGE_INTEGER = c_ulonglong
-_ULARGE_INTEGER = c_ulonglong
-
-OLESTR = c_wchar_p
-LPOLESTR = c_wchar_p
-LPCOLESTR = c_wchar_p
-LPWSTR = c_wchar_p
-LPCWSTR = c_wchar_p
-LPSTR = c_char_p
-LPCSTR = c_char_p
-LPVOID = c_void_p
-LPCVOID = c_void_p
-
-# These two types are pointer-sized unsigned and signed ints, respectively.
-# At runtime, they are either c_[u]long or c_[u]longlong, depending on the host's pointer size
-# (they are not really separate classes).
-class WPARAM(_SimpleCData[int]): ...
-class LPARAM(_SimpleCData[int]): ...
-
-ATOM = WORD
-LANGID = WORD
-COLORREF = DWORD
-LGRPID = DWORD
-LCTYPE = DWORD
-LCID = DWORD
-
-HANDLE = c_void_p
-HACCEL = HANDLE
-HBITMAP = HANDLE
-HBRUSH = HANDLE
-HCOLORSPACE = HANDLE
-HDC = HANDLE
-HDESK = HANDLE
-HDWP = HANDLE
-HENHMETAFILE = HANDLE
-HFONT = HANDLE
-HGDIOBJ = HANDLE
-HGLOBAL = HANDLE
-HHOOK = HANDLE
-HICON = HANDLE
-HINSTANCE = HANDLE
-HKEY = HANDLE
-HKL = HANDLE
-HLOCAL = HANDLE
-HMENU = HANDLE
-HMETAFILE = HANDLE
-HMODULE = HANDLE
-HMONITOR = HANDLE
-HPALETTE = HANDLE
-HPEN = HANDLE
-HRGN = HANDLE
-HRSRC = HANDLE
-HSTR = HANDLE
-HTASK = HANDLE
-HWINSTA = HANDLE
-HWND = HANDLE
-SC_HANDLE = HANDLE
-SERVICE_STATUS_HANDLE = HANDLE
-
-class RECT(Structure):
- left: LONG
- top: LONG
- right: LONG
- bottom: LONG
-RECTL = RECT
-_RECTL = RECT
-tagRECT = RECT
-
-class _SMALL_RECT(Structure):
- Left: SHORT
- Top: SHORT
- Right: SHORT
- Bottom: SHORT
-SMALL_RECT = _SMALL_RECT
-
-class _COORD(Structure):
- X: SHORT
- Y: SHORT
-
-class POINT(Structure):
- x: LONG
- y: LONG
-POINTL = POINT
-_POINTL = POINT
-tagPOINT = POINT
-
-class SIZE(Structure):
- cx: LONG
- cy: LONG
-SIZEL = SIZE
-tagSIZE = SIZE
-
-def RGB(red: int, green: int, blue: int) -> int: ...
-
-class FILETIME(Structure):
- dwLowDateTime: DWORD
- dwHighDateTime: DWORD
-_FILETIME = FILETIME
-
-class MSG(Structure):
- hWnd: HWND
- message: UINT
- wParam: WPARAM
- lParam: LPARAM
- time: DWORD
- pt: POINT
-tagMSG = MSG
-MAX_PATH: int
-
-class WIN32_FIND_DATAA(Structure):
- dwFileAttributes: DWORD
- ftCreationTime: FILETIME
- ftLastAccessTime: FILETIME
- ftLastWriteTime: FILETIME
- nFileSizeHigh: DWORD
- nFileSizeLow: DWORD
- dwReserved0: DWORD
- dwReserved1: DWORD
- cFileName: Array[CHAR]
- cAlternateFileName: Array[CHAR]
-
-class WIN32_FIND_DATAW(Structure):
- dwFileAttributes: DWORD
- ftCreationTime: FILETIME
- ftLastAccessTime: FILETIME
- ftLastWriteTime: FILETIME
- nFileSizeHigh: DWORD
- nFileSizeLow: DWORD
- dwReserved0: DWORD
- dwReserved1: DWORD
- cFileName: Array[WCHAR]
- cAlternateFileName: Array[WCHAR]
-
-# These pointer type definitions use pointer[...] instead of POINTER(...), to allow them
-# to be used in type annotations.
-PBOOL = pointer[BOOL]
-LPBOOL = pointer[BOOL]
-PBOOLEAN = pointer[BOOLEAN]
-PBYTE = pointer[BYTE]
-LPBYTE = pointer[BYTE]
-PCHAR = pointer[CHAR]
-LPCOLORREF = pointer[COLORREF]
-PDWORD = pointer[DWORD]
-LPDWORD = pointer[DWORD]
-PFILETIME = pointer[FILETIME]
-LPFILETIME = pointer[FILETIME]
-PFLOAT = pointer[FLOAT]
-PHANDLE = pointer[HANDLE]
-LPHANDLE = pointer[HANDLE]
-PHKEY = pointer[HKEY]
-LPHKL = pointer[HKL]
-PINT = pointer[INT]
-LPINT = pointer[INT]
-PLARGE_INTEGER = pointer[LARGE_INTEGER]
-PLCID = pointer[LCID]
-PLONG = pointer[LONG]
-LPLONG = pointer[LONG]
-PMSG = pointer[MSG]
-LPMSG = pointer[MSG]
-PPOINT = pointer[POINT]
-LPPOINT = pointer[POINT]
-PPOINTL = pointer[POINTL]
-PRECT = pointer[RECT]
-LPRECT = pointer[RECT]
-PRECTL = pointer[RECTL]
-LPRECTL = pointer[RECTL]
-LPSC_HANDLE = pointer[SC_HANDLE]
-PSHORT = pointer[SHORT]
-PSIZE = pointer[SIZE]
-LPSIZE = pointer[SIZE]
-PSIZEL = pointer[SIZEL]
-LPSIZEL = pointer[SIZEL]
-PSMALL_RECT = pointer[SMALL_RECT]
-PUINT = pointer[UINT]
-LPUINT = pointer[UINT]
-PULARGE_INTEGER = pointer[ULARGE_INTEGER]
-PULONG = pointer[ULONG]
-PUSHORT = pointer[USHORT]
-PWCHAR = pointer[WCHAR]
-PWIN32_FIND_DATAA = pointer[WIN32_FIND_DATAA]
-LPWIN32_FIND_DATAA = pointer[WIN32_FIND_DATAA]
-PWIN32_FIND_DATAW = pointer[WIN32_FIND_DATAW]
-LPWIN32_FIND_DATAW = pointer[WIN32_FIND_DATAW]
-PWORD = pointer[WORD]
-LPWORD = pointer[WORD]
diff --git a/typeshed/2and3/distutils/__init__.pyi b/typeshed/2and3/distutils/__init__.pyi
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/typeshed/2and3/distutils/spawn.pyi b/typeshed/2and3/distutils/spawn.pyi
deleted file mode 100644
index 8df9ebab7f..0000000000
--- a/typeshed/2and3/distutils/spawn.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for distutils.spawn
-
-from typing import List, Optional
-
-def spawn(cmd: List[str], search_path: bool = ...,
- verbose: bool = ..., dry_run: bool = ...) -> None: ...
-def find_executable(executable: str,
- path: Optional[str] = ...) -> Optional[str]: ...
diff --git a/typeshed/2and3/msvcrt.pyi b/typeshed/2and3/msvcrt.pyi
deleted file mode 100644
index bcab64cd9a..0000000000
--- a/typeshed/2and3/msvcrt.pyi
+++ /dev/null
@@ -1,8 +0,0 @@
-# Stubs for msvcrt
-
-# NOTE: These are incomplete!
-
-from typing import overload, BinaryIO, TextIO
-
-def get_osfhandle(file: int) -> int: ...
-def open_osfhandle(handle: int, flags: int) -> int: ...
diff --git a/typeshed/2and3/pathlib2.pyi b/typeshed/2and3/pathlib2.pyi
deleted file mode 100644
index 2e8a45a0da..0000000000
--- a/typeshed/2and3/pathlib2.pyi
+++ /dev/null
@@ -1,120 +0,0 @@
-from typing import Any, Generator, IO, Optional, Sequence, Text, Tuple, Type, TypeVar, Union, List
-from types import TracebackType
-import os
-import sys
-
-_P = TypeVar('_P', bound='PurePath')
-
-if sys.version_info >= (3, 6):
- _PurePathBase = os.PathLike[str]
-else:
- _PurePathBase = object
-
-class PurePath(_PurePathBase):
- parts: Tuple[str, ...]
- drive: str
- root: str
- anchor: str
- name: str
- suffix: str
- suffixes: List[str]
- stem: str
- if sys.version_info < (3, 6):
- def __new__(cls: Type[_P], *args: Union[Text, PurePath]) -> _P: ...
- else:
- def __new__(cls: Type[_P], *args: Union[Text, os.PathLike[str]]) -> _P: ...
- def __hash__(self) -> int: ...
- def __lt__(self, other: PurePath) -> bool: ...
- def __le__(self, other: PurePath) -> bool: ...
- def __gt__(self, other: PurePath) -> bool: ...
- def __ge__(self, other: PurePath) -> bool: ...
- def __truediv__(self: _P, key: Union[Text, PurePath]) -> _P: ...
- if sys.version_info < (3,):
- def __div__(self: _P, key: Union[Text, PurePath]) -> _P: ...
- def __bytes__(self) -> bytes: ...
- def as_posix(self) -> str: ...
- def as_uri(self) -> str: ...
- def is_absolute(self) -> bool: ...
- def is_reserved(self) -> bool: ...
- def match(self, path_pattern: str) -> bool: ...
- def relative_to(self: _P, *other: Union[str, PurePath]) -> _P: ...
- def with_name(self: _P, name: str) -> _P: ...
- def with_suffix(self: _P, suffix: str) -> _P: ...
- def joinpath(self: _P, *other: Union[str, PurePath]) -> _P: ...
-
- @property
- def parents(self: _P) -> Sequence[_P]: ...
- @property
- def parent(self: _P) -> _P: ...
-
-class PurePosixPath(PurePath): ...
-class PureWindowsPath(PurePath): ...
-
-class Path(PurePath):
- def __enter__(self) -> Path: ...
- def __exit__(self, exc_type: Optional[Type[BaseException]],
- exc_value: Optional[BaseException],
- traceback: Optional[TracebackType]) -> Optional[bool]: ...
- @classmethod
- def cwd(cls: Type[_P]) -> _P: ...
- def stat(self) -> os.stat_result: ...
- def chmod(self, mode: int) -> None: ...
- def exists(self) -> bool: ...
- def glob(self, pattern: str) -> Generator[Path, None, None]: ...
- def group(self) -> str: ...
- def is_dir(self) -> bool: ...
- def is_file(self) -> bool: ...
- def is_symlink(self) -> bool: ...
- def is_socket(self) -> bool: ...
- def is_fifo(self) -> bool: ...
- def is_block_device(self) -> bool: ...
- def is_char_device(self) -> bool: ...
- def iterdir(self) -> Generator[Path, None, None]: ...
- def lchmod(self, mode: int) -> None: ...
- def lstat(self) -> os.stat_result: ...
- if sys.version_info < (3, 5):
- def mkdir(self, mode: int = ...,
- parents: bool = ...) -> None: ...
- else:
- def mkdir(self, mode: int = ..., parents: bool = ...,
- exist_ok: bool = ...) -> None: ...
- def open(self, mode: str = ..., buffering: int = ...,
- encoding: Optional[str] = ..., errors: Optional[str] = ...,
- newline: Optional[str] = ...) -> IO[Any]: ...
- def owner(self) -> str: ...
- def rename(self, target: Union[str, PurePath]) -> None: ...
- def replace(self, target: Union[str, PurePath]) -> None: ...
- if sys.version_info < (3, 6):
- def resolve(self: _P) -> _P: ...
- else:
- def resolve(self: _P, strict: bool = ...) -> _P: ...
- def rglob(self, pattern: str) -> Generator[Path, None, None]: ...
- def rmdir(self) -> None: ...
- def symlink_to(self, target: Union[str, Path],
- target_is_directory: bool = ...) -> None: ...
- def touch(self, mode: int = ..., exist_ok: bool = ...) -> None: ...
- def unlink(self) -> None: ...
-
- if sys.version_info >= (3, 5):
- @classmethod
- def home(cls: Type[_P]) -> _P: ...
- if sys.version_info < (3, 6):
- def __new__(cls: Type[_P], *args: Union[str, PurePath],
- **kwargs: Any) -> _P: ...
- else:
- def __new__(cls: Type[_P], *args: Union[str, os.PathLike[str]],
- **kwargs: Any) -> _P: ...
-
- def absolute(self: _P) -> _P: ...
- def expanduser(self: _P) -> _P: ...
- def read_bytes(self) -> bytes: ...
- def read_text(self, encoding: Optional[str] = ...,
- errors: Optional[str] = ...) -> str: ...
- def samefile(self, other_path: Union[str, bytes, int, Path]) -> bool: ...
- def write_bytes(self, data: bytes) -> int: ...
- def write_text(self, data: str, encoding: Optional[str] = ...,
- errors: Optional[str] = ...) -> int: ...
-
-
-class PosixPath(Path, PurePosixPath): ...
-class WindowsPath(Path, PureWindowsPath): ...
diff --git a/typeshed/2and3/select.pyi b/typeshed/2and3/select.pyi
deleted file mode 100644
index 094bb916ce..0000000000
--- a/typeshed/2and3/select.pyi
+++ /dev/null
@@ -1,137 +0,0 @@
-import sys
-from typing import Any, Optional, Sequence, Tuple, Iterable, List, Union
-
-# When we have protocols, this should change to a protocol with a fileno method
-# See https://docs.python.org/3/c-api/file.html#c.PyObject_AsFileDescriptor
-_FileDescriptor = Union[int, Any]
-
-EPOLLERR: int
-EPOLLET: int
-EPOLLHUP: int
-EPOLLIN: int
-EPOLLMSG: int
-EPOLLONESHOT: int
-EPOLLOUT: int
-EPOLLPRI: int
-EPOLLRDBAND: int
-EPOLLRDNORM: int
-EPOLLWRBAND: int
-EPOLLWRNORM: int
-EPOLL_RDHUP: int
-KQ_EV_ADD: int
-KQ_EV_CLEAR: int
-KQ_EV_DELETE: int
-KQ_EV_DISABLE: int
-KQ_EV_ENABLE: int
-KQ_EV_EOF: int
-KQ_EV_ERROR: int
-KQ_EV_FLAG1: int
-KQ_EV_ONESHOT: int
-KQ_EV_SYSFLAGS: int
-KQ_FILTER_AIO: int
-KQ_FILTER_NETDEV: int
-KQ_FILTER_PROC: int
-KQ_FILTER_READ: int
-KQ_FILTER_SIGNAL: int
-KQ_FILTER_TIMER: int
-KQ_FILTER_VNODE: int
-KQ_FILTER_WRITE: int
-KQ_NOTE_ATTRIB: int
-KQ_NOTE_CHILD: int
-KQ_NOTE_DELETE: int
-KQ_NOTE_EXEC: int
-KQ_NOTE_EXIT: int
-KQ_NOTE_EXTEND: int
-KQ_NOTE_FORK: int
-KQ_NOTE_LINK: int
-KQ_NOTE_LINKDOWN: int
-KQ_NOTE_LINKINV: int
-KQ_NOTE_LINKUP: int
-KQ_NOTE_LOWAT: int
-KQ_NOTE_PCTRLMASK: int
-KQ_NOTE_PDATAMASK: int
-KQ_NOTE_RENAME: int
-KQ_NOTE_REVOKE: int
-KQ_NOTE_TRACK: int
-KQ_NOTE_TRACKERR: int
-KQ_NOTE_WRITE: int
-PIPE_BUF: int
-POLLERR: int
-POLLHUP: int
-POLLIN: int
-POLLMSG: int
-POLLNVAL: int
-POLLOUT: int
-POLLPRI: int
-POLLRDBAND: int
-POLLRDNORM: int
-POLLWRBAND: int
-POLLWRNORM: int
-
-class poll:
- def __init__(self) -> None: ...
- def register(self, fd: _FileDescriptor, eventmask: int = ...) -> None: ...
- def modify(self, fd: _FileDescriptor, eventmask: int) -> None: ...
- def unregister(self, fd: _FileDescriptor) -> None: ...
- def poll(self, timeout: Optional[float] = ...) -> List[Tuple[int, int]]: ...
-
-def select(rlist: Sequence[Any], wlist: Sequence[Any], xlist: Sequence[Any],
- timeout: Optional[float] = ...) -> Tuple[List[Any],
- List[Any],
- List[Any]]: ...
-
-if sys.version_info >= (3, 3):
- error = OSError
-else:
- class error(Exception): ...
-
-# BSD only
-class kevent(object):
- data: Any
- fflags: int
- filter: int
- flags: int
- ident: int
- udata: Any
- def __init__(self, ident: _FileDescriptor, filter: int = ..., flags: int = ..., fflags: int = ..., data: Any = ..., udata: Any = ...) -> None: ...
-
-# BSD only
-class kqueue(object):
- closed: bool
- def __init__(self) -> None: ...
- def close(self) -> None: ...
- def control(self, changelist: Optional[Iterable[kevent]], max_events: int, timeout: float = ...) -> List[kevent]: ...
- def fileno(self) -> int: ...
- @classmethod
- def fromfd(cls, fd: _FileDescriptor) -> kqueue: ...
-
-# Linux only
-class epoll(object):
- if sys.version_info >= (3, 3):
- def __init__(self, sizehint: int = ..., flags: int = ...) -> None: ...
- else:
- def __init__(self, sizehint: int = ...) -> None: ...
- if sys.version_info >= (3, 4):
- def __enter__(self) -> epoll: ...
- def __exit__(self, *args: Any) -> None: ...
- def close(self) -> None: ...
- closed: bool
- def fileno(self) -> int: ...
- def register(self, fd: _FileDescriptor, eventmask: int = ...) -> None: ...
- def modify(self, fd: _FileDescriptor, eventmask: int) -> None: ...
- def unregister(self, fd: _FileDescriptor) -> None: ...
- def poll(self, timeout: float = ..., maxevents: int = ...) -> List[Tuple[int, int]]: ...
- @classmethod
- def fromfd(cls, fd: _FileDescriptor) -> epoll: ...
-
-if sys.version_info >= (3, 3):
- # Solaris only
- class devpoll:
- if sys.version_info >= (3, 4):
- def close(self) -> None: ...
- closed: bool
- def fileno(self) -> int: ...
- def register(self, fd: _FileDescriptor, eventmask: int = ...) -> None: ...
- def modify(self, fd: _FileDescriptor, eventmask: int = ...) -> None: ...
- def unregister(self, fd: _FileDescriptor) -> None: ...
- def poll(self, timeout: Optional[float] = ...) -> List[Tuple[int, int]]: ...
\ No newline at end of file
diff --git a/typeshed/2and3/uuid.pyi b/typeshed/2and3/uuid.pyi
deleted file mode 100644
index 5828073c8e..0000000000
--- a/typeshed/2and3/uuid.pyi
+++ /dev/null
@@ -1,94 +0,0 @@
-# Stubs for uuid
-
-import sys
-from typing import Tuple, Optional, Any, Union
-
-# Because UUID has properties called int and bytes we need to rename these temporarily.
-_Int = int
-_Bytes = bytes
-_FieldsType = Tuple[int, int, int, int, int, int]
-if sys.version_info >= (3,):
- _Text = str
-else:
- _Text = Union[str, unicode]
-
-class UUID:
- def __init__(self, hex: Optional[_Text] = ...,
- bytes: Optional[_Bytes] = ...,
- bytes_le: Optional[_Bytes] = ...,
- fields: Optional[_FieldsType] = ...,
- int: Optional[_Int] = ...,
- version: Optional[_Int] = ...) -> None: ...
- @property
- def bytes(self) -> _Bytes: ...
- @property
- def bytes_le(self) -> _Bytes: ...
- @property
- def clock_seq(self) -> _Int: ...
- @property
- def clock_seq_hi_variant(self) -> _Int: ...
- @property
- def clock_seq_low(self) -> _Int: ...
- @property
- def fields(self) -> _FieldsType: ...
- @property
- def hex(self) -> str: ...
- @property
- def int(self) -> _Int: ...
- @property
- def node(self) -> _Int: ...
- @property
- def time(self) -> _Int: ...
- @property
- def time_hi_version(self) -> _Int: ...
- @property
- def time_low(self) -> _Int: ...
- @property
- def time_mid(self) -> _Int: ...
- @property
- def urn(self) -> str: ...
- @property
- def variant(self) -> str: ...
- @property
- def version(self) -> Optional[_Int]: ...
-
- def __int__(self) -> _Int: ...
-
- if sys.version_info >= (3,):
- def __eq__(self, other: Any) -> bool: ...
- def __lt__(self, other: Any) -> bool: ...
- def __le__(self, other: Any) -> bool: ...
- def __gt__(self, other: Any) -> bool: ...
- def __ge__(self, other: Any) -> bool: ...
- else:
- def get_bytes(self) -> _Bytes: ...
- def get_bytes_le(self) -> _Bytes: ...
- def get_clock_seq(self) -> _Int: ...
- def get_clock_seq_hi_variant(self) -> _Int: ...
- def get_clock_seq_low(self) -> _Int: ...
- def get_fields(self) -> _FieldsType: ...
- def get_hex(self) -> str: ...
- def get_node(self) -> _Int: ...
- def get_time(self) -> _Int: ...
- def get_time_hi_version(self) -> _Int: ...
- def get_time_low(self) -> _Int: ...
- def get_time_mid(self) -> _Int: ...
- def get_urn(self) -> str: ...
- def get_variant(self) -> str: ...
- def get_version(self) -> Optional[_Int]: ...
- def __cmp__(self, other: Any) -> _Int: ...
-
-def getnode() -> int: ...
-def uuid1(node: Optional[_Int] = ..., clock_seq: Optional[_Int] = ...) -> UUID: ...
-def uuid3(namespace: UUID, name: str) -> UUID: ...
-def uuid4() -> UUID: ...
-def uuid5(namespace: UUID, name: str) -> UUID: ...
-
-NAMESPACE_DNS = ... # type: UUID
-NAMESPACE_URL = ... # type: UUID
-NAMESPACE_OID = ... # type: UUID
-NAMESPACE_X500 = ... # type: UUID
-RESERVED_NCS = ... # type: str
-RFC_4122 = ... # type: str
-RESERVED_MICROSOFT = ... # type: str
-RESERVED_FUTURE = ... # type: str
diff --git a/typeshed/3/logging/__init__.pyi b/typeshed/3/logging/__init__.pyi
deleted file mode 100644
index b104a2f102..0000000000
--- a/typeshed/3/logging/__init__.pyi
+++ /dev/null
@@ -1,561 +0,0 @@
-# Stubs for logging (Python 3.7)
-
-from typing import (
- Any, Callable, Dict, Iterable, List, Mapping, MutableMapping, Optional, IO,
- Tuple, Text, Union, overload,
-)
-from string import Template
-from time import struct_time
-from types import TracebackType, FrameType
-import sys
-import threading
-
-_SysExcInfoType = Union[Tuple[type, BaseException, Optional[TracebackType]],
- Tuple[None, None, None]]
-if sys.version_info >= (3, 5):
- _ExcInfoType = Union[None, bool, _SysExcInfoType, BaseException]
-else:
- _ExcInfoType = Union[None, bool, _SysExcInfoType]
-_ArgsType = Union[Tuple[Any, ...], Mapping[str, Any]]
-_FilterType = Union[Filter, Callable[[LogRecord], int]]
-_Level = Union[int, Text]
-if sys.version_info >= (3, 6):
- from os import PathLike
- _Path = Union[str, PathLike[str]]
-else:
- _Path = str
-
-raiseExceptions: bool
-logThreads: bool
-logMultiprocessing: bool
-logProcesses: bool
-
-def currentframe() -> FrameType: ...
-
-if sys.version_info >= (3,):
- _levelToName: Dict[int, str]
- _nameToLevel: Dict[str, int]
-else:
- _levelNames: Dict[Union[int, str], Union[str, int]] # Union[int:str, str:int]
-
-class Filterer(object):
- filters: List[Filter]
- def __init__(self) -> None: ...
- def addFilter(self, filter: Filter) -> None: ...
- def removeFilter(self, filter: Filter) -> None: ...
- def filter(self, record: LogRecord) -> bool: ...
-
-class Logger(Filterer):
- name: str
- level: int
- parent: Union[Logger, PlaceHolder]
- propagate: bool
- handlers: List[Handler]
- disabled: int
- def __init__(self, name: str, level: _Level = ...) -> None: ...
- def setLevel(self, level: _Level) -> None: ...
- def isEnabledFor(self, level: int) -> bool: ...
- def getEffectiveLevel(self) -> int: ...
- def getChild(self, suffix: str) -> Logger: ...
- if sys.version_info >= (3, 8):
- def debug(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def info(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def warning(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def warn(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def error(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def exception(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def critical(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def log(self, level: int, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def _log(
- self,
- level: int,
- msg: Any,
- args: _ArgsType,
- exc_info: Optional[_ExcInfoType] = ...,
- extra: Optional[Dict[str, Any]] = ...,
- stack_info: bool = ...,
- stacklevel: int = ...,
- ) -> None: ... # undocumented
- elif sys.version_info >= (3,):
- def debug(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def info(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def warning(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def warn(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def error(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def critical(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- fatal = critical
- def log(self, level: int, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def exception(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def _log(
- self,
- level: int,
- msg: Any,
- args: _ArgsType,
- exc_info: Optional[_ExcInfoType] = ...,
- extra: Optional[Dict[str, Any]] = ...,
- stack_info: bool = ...,
- ) -> None: ... # undocumented
- else:
- def debug(self,
- msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def info(self,
- msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def warning(self,
- msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- warn = warning
- def error(self,
- msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def critical(self,
- msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- fatal = critical
- def log(self,
- level: int, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def exception(self,
- msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def _log(
- self,
- level: int,
- msg: Any,
- args: _ArgsType,
- exc_info: Optional[_ExcInfoType] = ...,
- extra: Optional[Dict[str, Any]] = ...,
- ) -> None: ... # undocumented
- def addFilter(self, filt: _FilterType) -> None: ...
- def removeFilter(self, filt: _FilterType) -> None: ...
- def filter(self, record: LogRecord) -> bool: ...
- def addHandler(self, hdlr: Handler) -> None: ...
- def removeHandler(self, hdlr: Handler) -> None: ...
- if sys.version_info >= (3, 8):
- def findCaller(self, stack_info: bool = ..., stacklevel: int = ...) -> Tuple[str, int, str, Optional[str]]: ...
- elif sys.version_info >= (3,):
- def findCaller(self, stack_info: bool = ...) -> Tuple[str, int, str, Optional[str]]: ...
- else:
- def findCaller(self) -> Tuple[str, int, str]: ...
- def handle(self, record: LogRecord) -> None: ...
- if sys.version_info >= (3,):
- def makeRecord(self, name: str, level: int, fn: str, lno: int, msg: Any,
- args: _ArgsType,
- exc_info: Optional[_SysExcInfoType],
- func: Optional[str] = ...,
- extra: Optional[Mapping[str, Any]] = ...,
- sinfo: Optional[str] = ...) -> LogRecord: ...
- else:
- def makeRecord(self,
- name: str, level: int, fn: str, lno: int, msg: Any,
- args: _ArgsType,
- exc_info: Optional[_SysExcInfoType],
- func: Optional[str] = ...,
- extra: Optional[Mapping[str, Any]] = ...) -> LogRecord: ...
- if sys.version_info >= (3,):
- def hasHandlers(self) -> bool: ...
-
-
-CRITICAL: int
-FATAL: int
-ERROR: int
-WARNING: int
-WARN: int
-INFO: int
-DEBUG: int
-NOTSET: int
-
-
-class Handler(Filterer):
- level: int # undocumented
- formatter: Optional[Formatter] # undocumented
- lock: Optional[threading.Lock] # undocumented
- name: Optional[str] # undocumented
- def __init__(self, level: _Level = ...) -> None: ...
- def createLock(self) -> None: ...
- def acquire(self) -> None: ...
- def release(self) -> None: ...
- def setLevel(self, level: _Level) -> None: ...
- def setFormatter(self, fmt: Formatter) -> None: ...
- def addFilter(self, filt: _FilterType) -> None: ...
- def removeFilter(self, filt: _FilterType) -> None: ...
- def filter(self, record: LogRecord) -> bool: ...
- def flush(self) -> None: ...
- def close(self) -> None: ...
- def handle(self, record: LogRecord) -> None: ...
- def handleError(self, record: LogRecord) -> None: ...
- def format(self, record: LogRecord) -> str: ...
- def emit(self, record: LogRecord) -> None: ...
-
-
-class Formatter:
- converter: Callable[[Optional[float]], struct_time]
- _fmt: Optional[str]
- datefmt: Optional[str]
- if sys.version_info >= (3,):
- _style: PercentStyle
- default_time_format: str
- default_msec_format: str
-
- if sys.version_info >= (3, 8):
- def __init__(self, fmt: Optional[str] = ...,
- datefmt: Optional[str] = ...,
- style: str = ..., validate: bool = ...) -> None: ...
- elif sys.version_info >= (3,):
- def __init__(self, fmt: Optional[str] = ...,
- datefmt: Optional[str] = ...,
- style: str = ...) -> None: ...
- else:
- def __init__(self,
- fmt: Optional[str] = ...,
- datefmt: Optional[str] = ...) -> None: ...
-
- def format(self, record: LogRecord) -> str: ...
- def formatTime(self, record: LogRecord, datefmt: Optional[str] = ...) -> str: ...
- def formatException(self, exc_info: _SysExcInfoType) -> str: ...
- if sys.version_info >= (3,):
- def formatMessage(self, record: LogRecord) -> str: ... # undocumented
- def formatStack(self, stack_info: str) -> str: ...
-
-
-class Filter:
- def __init__(self, name: str = ...) -> None: ...
- def filter(self, record: LogRecord) -> int: ...
-
-
-class LogRecord:
- args: _ArgsType
- asctime: str
- created: float
- exc_info: Optional[_SysExcInfoType]
- exc_text: Optional[str]
- filename: str
- funcName: str
- levelname: str
- levelno: int
- lineno: int
- module: str
- msecs: float
- message: str
- msg: str
- name: str
- pathname: str
- process: int
- processName: str
- relativeCreated: int
- if sys.version_info >= (3,):
- stack_info: Optional[str]
- thread: int
- threadName: str
- if sys.version_info >= (3,):
- def __init__(self, name: str, level: int, pathname: str, lineno: int,
- msg: Any, args: _ArgsType,
- exc_info: Optional[_SysExcInfoType],
- func: Optional[str] = ...,
- sinfo: Optional[str] = ...) -> None: ...
- else:
- def __init__(self,
- name: str, level: int, pathname: str, lineno: int,
- msg: Any, args: _ArgsType,
- exc_info: Optional[_SysExcInfoType],
- func: Optional[str] = ...) -> None: ...
- def getMessage(self) -> str: ...
-
-
-class LoggerAdapter:
- logger: Logger
- extra: Mapping[str, Any]
- def __init__(self, logger: Logger, extra: Mapping[str, Any]) -> None: ...
- def process(self, msg: Any, kwargs: MutableMapping[str, Any]) -> Tuple[Any, MutableMapping[str, Any]]: ...
- if sys.version_info >= (3, 8):
- def debug(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def info(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def warning(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def warn(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def error(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def exception(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def critical(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def log(self, level: int, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- elif sys.version_info >= (3,):
- def debug(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def info(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def warning(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def warn(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def error(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def exception(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def critical(self, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def log(self, level: int, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- else:
- def debug(self,
- msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def info(self,
- msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def warning(self,
- msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def error(self,
- msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def exception(self,
- msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def critical(self,
- msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def log(self,
- level: int, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def isEnabledFor(self, lvl: int) -> bool: ...
- if sys.version_info >= (3,):
- def getEffectiveLevel(self) -> int: ...
- def setLevel(self, lvl: Union[int, str]) -> None: ...
- def hasHandlers(self) -> bool: ...
- if sys.version_info >= (3, 6):
- def _log(
- self,
- level: int,
- msg: Any,
- args: _ArgsType,
- exc_info: Optional[_ExcInfoType] = ...,
- extra: Optional[Dict[str, Any]] = ...,
- stack_info: bool = ...,
- ) -> None: ... # undocumented
-
-if sys.version_info >= (3,):
- def getLogger(name: Optional[str] = ...) -> Logger: ...
-else:
- @overload
- def getLogger() -> Logger: ...
- @overload
- def getLogger(name: Union[Text, str]) -> Logger: ...
-def getLoggerClass() -> type: ...
-if sys.version_info >= (3,):
- def getLogRecordFactory() -> Callable[..., LogRecord]: ...
-
-if sys.version_info >= (3, 8):
- def debug(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def info(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def warning(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def warn(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def error(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def critical(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def exception(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def log(level: int, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., stacklevel: int = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
-elif sys.version_info >= (3,):
- def debug(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def info(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def warning(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def warn(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def error(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def critical(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def exception(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
- def log(level: int, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- stack_info: bool = ..., extra: Optional[Dict[str, Any]] = ...,
- **kwargs: Any) -> None: ...
-else:
- def debug(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def info(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def warning(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- warn = warning
- def error(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def critical(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def exception(msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
- def log(level: int, msg: Any, *args: Any, exc_info: _ExcInfoType = ...,
- extra: Optional[Dict[str, Any]] = ..., **kwargs: Any) -> None: ...
-fatal = critical
-
-def disable(lvl: int) -> None: ...
-def addLevelName(lvl: int, levelName: str) -> None: ...
-def getLevelName(lvl: Union[int, str]) -> Any: ...
-
-def makeLogRecord(attrdict: Mapping[str, Any]) -> LogRecord: ...
-
-if sys.version_info >= (3, 8):
- def basicConfig(*, filename: Optional[_Path] = ..., filemode: str = ...,
- format: str = ..., datefmt: Optional[str] = ..., style: str = ...,
- level: Optional[_Level] = ..., stream: Optional[IO[str]] = ...,
- handlers: Optional[Iterable[Handler]] = ..., force: bool = ...) -> None: ...
-elif sys.version_info >= (3,):
- def basicConfig(*, filename: Optional[_Path] = ..., filemode: str = ...,
- format: str = ..., datefmt: Optional[str] = ..., style: str = ...,
- level: Optional[_Level] = ..., stream: Optional[IO[str]] = ...,
- handlers: Optional[Iterable[Handler]] = ...) -> None: ...
-else:
- @overload
- def basicConfig() -> None: ...
- @overload
- def basicConfig(*, filename: Optional[str] = ..., filemode: str = ...,
- format: str = ..., datefmt: Optional[str] = ...,
- level: Optional[_Level] = ..., stream: IO[str] = ...) -> None: ...
-def shutdown() -> None: ...
-
-def setLoggerClass(klass: type) -> None: ...
-
-def captureWarnings(capture: bool) -> None: ...
-
-if sys.version_info >= (3,):
- def setLogRecordFactory(factory: Callable[..., LogRecord]) -> None: ...
-
-
-if sys.version_info >= (3,):
- lastResort: Optional[StreamHandler]
-
-
-class StreamHandler(Handler):
- stream: IO[str] # undocumented
- if sys.version_info >= (3, 2):
- terminator: str
- def __init__(self, stream: Optional[IO[str]] = ...) -> None: ...
- if sys.version_info >= (3, 7):
- def setStream(self, stream: IO[str]) -> Optional[IO[str]]: ...
-
-
-class FileHandler(StreamHandler):
- baseFilename: str # undocumented
- mode: str # undocumented
- encoding: Optional[str] # undocumented
- delay: bool # undocumented
- def __init__(self, filename: _Path, mode: str = ...,
- encoding: Optional[str] = ..., delay: bool = ...) -> None: ...
- def _open(self) -> IO[Any]: ...
-
-
-class NullHandler(Handler): ...
-
-
-class PlaceHolder:
- def __init__(self, alogger: Logger) -> None: ...
- def append(self, alogger: Logger) -> None: ...
-
-
-# Below aren't in module docs but still visible
-
-class RootLogger(Logger): ...
-
-root: RootLogger
-
-
-if sys.version_info >= (3,):
- class PercentStyle(object):
- default_format: str
- asctime_format: str
- asctime_search: str
- _fmt: str
-
- def __init__(self, fmt: str) -> None: ...
- def usesTime(self) -> bool: ...
- def format(self, record: Any) -> str: ...
-
- class StrFormatStyle(PercentStyle):
- ...
-
- class StringTemplateStyle(PercentStyle):
- _tpl: Template
-
- _STYLES: Dict[str, Tuple[PercentStyle, str]]
-
-
-BASIC_FORMAT: str
diff --git a/typeshed/3/py/__init__.pyi b/typeshed/3/py/__init__.pyi
deleted file mode 100644
index 27b3076a7a..0000000000
--- a/typeshed/3/py/__init__.pyi
+++ /dev/null
@@ -1,13 +0,0 @@
-# Names in __all__ with no definition:
-# _pydir
-# builtin
-# code
-# iniconfig
-# io
-# log
-# path
-# process
-# std
-# test
-# version
-# xml
diff --git a/typeshed/3/py/__metainfo.pyi b/typeshed/3/py/__metainfo.pyi
deleted file mode 100644
index e6315ea2c8..0000000000
--- a/typeshed/3/py/__metainfo.pyi
+++ /dev/null
@@ -1,3 +0,0 @@
-from typing import Any
-
-pydir: Any
diff --git a/typeshed/3/py/_builtin.pyi b/typeshed/3/py/_builtin.pyi
deleted file mode 100644
index aedc6c5725..0000000000
--- a/typeshed/3/py/_builtin.pyi
+++ /dev/null
@@ -1,16 +0,0 @@
-from typing import Any, Optional
-
-BaseException = BaseException
-GeneratorExit = GeneratorExit
-all = all
-any = any
-callable = callable
-enumerate = enumerate
-reversed = reversed
-set: Any
-frozenset: Any
-sorted = sorted
-text = str
-bytes = bytes
-
-def execfile(fn: Any, globs: Optional[Any] = ..., locs: Optional[Any] = ...) -> None: ...
diff --git a/typeshed/3/py/_code/__init__.pyi b/typeshed/3/py/_code/__init__.pyi
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/typeshed/3/py/_code/_assertionnew.pyi b/typeshed/3/py/_code/_assertionnew.pyi
deleted file mode 100644
index 9c4e0a76b8..0000000000
--- a/typeshed/3/py/_code/_assertionnew.pyi
+++ /dev/null
@@ -1,31 +0,0 @@
-import ast
-from py._code.assertion import BuiltinAssertionError as BuiltinAssertionError
-from typing import Any, Optional
-
-class Failure(Exception):
- cause: Any = ...
- explanation: Any = ...
- def __init__(self, explanation: str = ...) -> None: ...
-
-def interpret(source: Any, frame: Any, should_fail: bool = ...): ...
-def run(offending_line: Any, frame: Optional[Any] = ...): ...
-def getfailure(failure: Any): ...
-
-operator_map: Any
-unary_map: Any
-
-class DebugInterpreter(ast.NodeVisitor):
- frame: Any = ...
- def __init__(self, frame: Any) -> None: ...
- def generic_visit(self, node: Any): ...
- def visit_Expr(self, expr: Any): ...
- def visit_Module(self, mod: Any) -> None: ...
- def visit_Name(self, name: Any): ...
- def visit_Compare(self, comp: Any): ...
- def visit_BoolOp(self, boolop: Any): ...
- def visit_UnaryOp(self, unary: Any): ...
- def visit_BinOp(self, binop: Any): ...
- def visit_Call(self, call: Any): ...
- def visit_Attribute(self, attr: Any): ...
- def visit_Assert(self, assrt: Any): ...
- def visit_Assign(self, assign: Any): ...
diff --git a/typeshed/3/py/_code/_assertionold.pyi b/typeshed/3/py/_code/_assertionold.pyi
deleted file mode 100644
index a60491e6ae..0000000000
--- a/typeshed/3/py/_code/_assertionold.pyi
+++ /dev/null
@@ -1,110 +0,0 @@
-from py._code.assertion import BuiltinAssertionError as BuiltinAssertionError
-from typing import Any, Optional
-
-passthroughex: Any
-
-class Failure:
- node: Any = ...
- def __init__(self, node: Any) -> None: ...
-
-class View:
- __view__: Any = ...
- __obj__: Any = ...
- __rootclass__: Any = ...
- __class__: Any = ...
- def __new__(rootclass: Any, obj: Any, *args: Any, **kwds: Any): ...
- def __getattr__(self, attr: Any): ...
- def __viewkey__(self): ...
- def __matchkey__(self, key: Any, subclasses: Any): ...
-
-def enumsubclasses(cls) -> None: ...
-
-class Interpretable(View):
- explanation: Any = ...
- def is_builtin(self, frame: Any): ...
- result: Any = ...
- def eval(self, frame: Any) -> None: ...
- def run(self, frame: Any) -> None: ...
- def nice_explanation(self): ...
-
-class Name(Interpretable):
- __view__: Any = ...
- def is_local(self, frame: Any): ...
- def is_global(self, frame: Any): ...
- def is_builtin(self, frame: Any): ...
- explanation: Any = ...
- def eval(self, frame: Any) -> None: ...
-
-class Compare(Interpretable):
- __view__: Any = ...
- explanation: Any = ...
- result: Any = ...
- def eval(self, frame: Any) -> None: ...
-
-class And(Interpretable):
- __view__: Any = ...
- result: Any = ...
- explanation: Any = ...
- def eval(self, frame: Any) -> None: ...
-
-class Or(Interpretable):
- __view__: Any = ...
- result: Any = ...
- explanation: Any = ...
- def eval(self, frame: Any) -> None: ...
-
-keepalive: Any
-
-class UnaryArith(Interpretable):
- __view__: Any = ...
- explanation: Any = ...
- result: Any = ...
- def eval(self, frame: Any, astpattern: Any = ...) -> None: ...
-
-class BinaryArith(Interpretable):
- __view__: Any = ...
- explanation: Any = ...
- result: Any = ...
- def eval(self, frame: Any, astpattern: Any = ...) -> None: ...
-
-class CallFunc(Interpretable):
- __view__: Any = ...
- def is_bool(self, frame: Any): ...
- explanation: Any = ...
- result: Any = ...
- def eval(self, frame: Any) -> None: ...
-
-class Getattr(Interpretable):
- __view__: Any = ...
- result: Any = ...
- explanation: Any = ...
- def eval(self, frame: Any) -> None: ...
-
-class Assert(Interpretable):
- __view__: Any = ...
- result: Any = ...
- explanation: Any = ...
- def run(self, frame: Any) -> None: ...
-
-class Assign(Interpretable):
- __view__: Any = ...
- result: Any = ...
- explanation: Any = ...
- def run(self, frame: Any) -> None: ...
-
-class Discard(Interpretable):
- __view__: Any = ...
- result: Any = ...
- explanation: Any = ...
- def run(self, frame: Any) -> None: ...
-
-class Stmt(Interpretable):
- __view__: Any = ...
- def run(self, frame: Any) -> None: ...
-
-def report_failure(e: Any) -> None: ...
-def check(s: Any, frame: Optional[Any] = ...) -> None: ...
-def interpret(source: Any, frame: Any, should_fail: bool = ...): ...
-def getmsg(excinfo: Any): ...
-def getfailure(e: Any): ...
-def run(s: Any, frame: Optional[Any] = ...) -> None: ...
diff --git a/typeshed/3/py/_code/_py2traceback.pyi b/typeshed/3/py/_code/_py2traceback.pyi
deleted file mode 100644
index c2e4595fda..0000000000
--- a/typeshed/3/py/_code/_py2traceback.pyi
+++ /dev/null
@@ -1,3 +0,0 @@
-from typing import Any
-
-def format_exception_only(etype: Any, value: Any): ...
diff --git a/typeshed/3/py/_code/assertion.pyi b/typeshed/3/py/_code/assertion.pyi
deleted file mode 100644
index d81fc60978..0000000000
--- a/typeshed/3/py/_code/assertion.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-from typing import Any
-
-BuiltinAssertionError: Any
-
-class AssertionError(BuiltinAssertionError):
- msg: Any = ...
- args: Any = ...
- def __init__(self, *args: Any) -> None: ...
-
-reinterpret_old: str
diff --git a/typeshed/3/py/_code/code.pyi b/typeshed/3/py/_code/code.pyi
deleted file mode 100644
index c2387d6dc6..0000000000
--- a/typeshed/3/py/_code/code.pyi
+++ /dev/null
@@ -1,162 +0,0 @@
-from typing import Any, Optional
-
-builtin_repr = repr
-reprlib: Any
-
-class Code:
- filename: Any = ...
- firstlineno: Any = ...
- name: Any = ...
- raw: Any = ...
- def __init__(self, rawcode: Any) -> None: ...
- def __eq__(self, other: Any) -> Any: ...
- def __ne__(self, other: Any) -> Any: ...
- @property
- def path(self): ...
- @property
- def fullsource(self): ...
- def source(self): ...
- def getargs(self, var: bool = ...): ...
-
-class Frame:
- lineno: Any = ...
- f_globals: Any = ...
- f_locals: Any = ...
- raw: Any = ...
- code: Any = ...
- def __init__(self, frame: Any) -> None: ...
- @property
- def statement(self): ...
- def eval(self, code: Any, **vars: Any): ...
- def exec_(self, code: Any, **vars: Any) -> None: ...
- def repr(self, object: Any): ...
- def is_true(self, object: Any): ...
- def getargs(self, var: bool = ...): ...
-
-class TracebackEntry:
- exprinfo: Any = ...
- lineno: Any = ...
- def __init__(self, rawentry: Any) -> None: ...
- def set_repr_style(self, mode: Any) -> None: ...
- @property
- def frame(self): ...
- @property
- def relline(self): ...
- @property
- def statement(self): ...
- @property
- def path(self): ...
- def getlocals(self): ...
- locals: Any = ...
- def reinterpret(self): ...
- def getfirstlinesource(self): ...
- def getsource(self, astcache: Optional[Any] = ...): ...
- source: Any = ...
- def ishidden(self): ...
- def name(self): ...
- name: Any = ...
-
-class Traceback(list):
- Entry: Any = ...
- def __init__(self, tb: Any) -> None: ...
- def cut(self, path: Optional[Any] = ..., lineno: Optional[Any] = ..., firstlineno: Optional[Any] = ..., excludepath: Optional[Any] = ...): ...
- def __getitem__(self, key: Any): ...
- def filter(self, fn: Any = ...): ...
- def getcrashentry(self): ...
- def recursionindex(self): ...
-
-co_equal: Any
-
-class ExceptionInfo:
- type: Any = ...
- value: Any = ...
- tb: Any = ...
- typename: Any = ...
- traceback: Any = ...
- def __init__(self, tup: Optional[Any] = ..., exprinfo: Optional[Any] = ...) -> None: ...
- def exconly(self, tryshort: bool = ...): ...
- def errisinstance(self, exc: Any): ...
- def getrepr(self, showlocals: bool = ..., style: str = ..., abspath: bool = ..., tbfilter: bool = ..., funcargs: bool = ...): ...
- def __unicode__(self): ...
-
-class FormattedExcinfo:
- flow_marker: str = ...
- fail_marker: str = ...
- showlocals: Any = ...
- style: Any = ...
- tbfilter: Any = ...
- funcargs: Any = ...
- abspath: Any = ...
- astcache: Any = ...
- def __init__(self, showlocals: bool = ..., style: str = ..., abspath: bool = ..., tbfilter: bool = ..., funcargs: bool = ...) -> None: ...
- def repr_args(self, entry: Any): ...
- def get_source(self, source: Any, line_index: int = ..., excinfo: Optional[Any] = ..., short: bool = ...): ...
- def get_exconly(self, excinfo: Any, indent: int = ..., markall: bool = ...): ...
- def repr_locals(self, locals: Any): ...
- def repr_traceback_entry(self, entry: Any, excinfo: Optional[Any] = ...): ...
- def repr_traceback(self, excinfo: Any): ...
- def repr_excinfo(self, excinfo: Any): ...
-
-class TerminalRepr:
- def __unicode__(self): ...
-
-class ReprExceptionInfo(TerminalRepr):
- reprtraceback: Any = ...
- reprcrash: Any = ...
- sections: Any = ...
- def __init__(self, reprtraceback: Any, reprcrash: Any) -> None: ...
- def addsection(self, name: Any, content: Any, sep: str = ...) -> None: ...
- def toterminal(self, tw: Any) -> None: ...
-
-class ReprTraceback(TerminalRepr):
- entrysep: str = ...
- reprentries: Any = ...
- extraline: Any = ...
- style: Any = ...
- def __init__(self, reprentries: Any, extraline: Any, style: Any) -> None: ...
- def toterminal(self, tw: Any) -> None: ...
-
-class ReprTracebackNative(ReprTraceback):
- style: str = ...
- reprentries: Any = ...
- extraline: Any = ...
- def __init__(self, tblines: Any) -> None: ...
-
-class ReprEntryNative(TerminalRepr):
- style: str = ...
- lines: Any = ...
- def __init__(self, tblines: Any) -> None: ...
- def toterminal(self, tw: Any) -> None: ...
-
-class ReprEntry(TerminalRepr):
- localssep: str = ...
- lines: Any = ...
- reprfuncargs: Any = ...
- reprlocals: Any = ...
- reprfileloc: Any = ...
- style: Any = ...
- def __init__(self, lines: Any, reprfuncargs: Any, reprlocals: Any, filelocrepr: Any, style: Any) -> None: ...
- def toterminal(self, tw: Any) -> None: ...
-
-class ReprFileLocation(TerminalRepr):
- path: Any = ...
- lineno: Any = ...
- message: Any = ...
- def __init__(self, path: Any, lineno: Any, message: Any) -> None: ...
- def toterminal(self, tw: Any) -> None: ...
-
-class ReprLocals(TerminalRepr):
- lines: Any = ...
- def __init__(self, lines: Any) -> None: ...
- def toterminal(self, tw: Any) -> None: ...
-
-class ReprFuncArgs(TerminalRepr):
- args: Any = ...
- def __init__(self, args: Any) -> None: ...
- def toterminal(self, tw: Any) -> None: ...
-
-oldbuiltins: Any
-
-def patch_builtins(assertion: bool = ..., compile: bool = ...) -> None: ...
-def unpatch_builtins(assertion: bool = ..., compile: bool = ...) -> None: ...
-def getrawcode(obj: Any, trycall: bool = ...): ...
diff --git a/typeshed/3/py/_code/source.pyi b/typeshed/3/py/_code/source.pyi
deleted file mode 100644
index b432c3990e..0000000000
--- a/typeshed/3/py/_code/source.pyi
+++ /dev/null
@@ -1,29 +0,0 @@
-from types import ModuleType as ModuleType
-from typing import Any, Optional
-
-cpy_compile = compile
-
-class Source:
- lines: Any = ...
- def __init__(self, *parts: Any, **kwargs: Any) -> None: ...
- def __eq__(self, other: Any) -> Any: ...
- def __getitem__(self, key: Any): ...
- def __len__(self): ...
- def __getslice__(self, start: Any, end: Any): ...
- def strip(self): ...
- def putaround(self, before: str = ..., after: str = ..., indent: Any = ...): ...
- def indent(self, indent: Any = ...): ...
- def getstatement(self, lineno: Any, assertion: bool = ...): ...
- def getstatementrange(self, lineno: Any, assertion: bool = ...): ...
- def deindent(self, offset: Optional[Any] = ...): ...
- def isparseable(self, deindent: bool = ...): ...
- def compile(self, filename: Optional[Any] = ..., mode: str = ..., flag: Any = ..., dont_inherit: int = ..., _genframe: Optional[Any] = ...): ...
-
-def compile_(source: Any, filename: Optional[Any] = ..., mode: str = ..., flags: Any = ..., dont_inherit: int = ...): ...
-def getfslineno(obj: Any): ...
-def findsource(obj: Any): ...
-def getsource(obj: Any, **kwargs: Any): ...
-def deindent(lines: Any, offset: Optional[Any] = ...): ...
-def get_statement_startend2(lineno: Any, node: Any): ...
-def getstatementrange_ast(lineno: Any, source: Any, assertion: bool = ..., astnode: Optional[Any] = ...): ...
-def getstatementrange_old(lineno: Any, source: Any, assertion: bool = ...): ...
diff --git a/typeshed/3/py/_error.pyi b/typeshed/3/py/_error.pyi
deleted file mode 100644
index 0b9451fd46..0000000000
--- a/typeshed/3/py/_error.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-from types import ModuleType
-from typing import Any
-
-class Error(EnvironmentError): ...
-
-class ErrorMaker(ModuleType):
- Error: Any = ...
- def __getattr__(self, name: Any): ...
- def checked_call(self, func: Any, *args: Any, **kwargs: Any): ...
-
-error: Any
diff --git a/typeshed/3/py/_io/__init__.pyi b/typeshed/3/py/_io/__init__.pyi
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/typeshed/3/py/_io/capture.pyi b/typeshed/3/py/_io/capture.pyi
deleted file mode 100644
index 12e02600f5..0000000000
--- a/typeshed/3/py/_io/capture.pyi
+++ /dev/null
@@ -1,61 +0,0 @@
-from StringIO import StringIO
-from io import BytesIO as BytesIO
-from typing import Any, Optional
-
-TextIO = StringIO
-
-class BytesIO(StringIO):
- def write(self, data: Any) -> None: ...
-
-patchsysdict: Any
-
-class FDCapture:
- targetfd: Any = ...
- tmpfile: Any = ...
- def __init__(self, targetfd: Any, tmpfile: Optional[Any] = ..., now: bool = ..., patchsys: bool = ...) -> None: ...
- def start(self) -> None: ...
- def done(self): ...
- def writeorg(self, data: Any) -> None: ...
-
-def dupfile(f: Any, mode: Optional[Any] = ..., buffering: int = ..., raising: bool = ..., encoding: Optional[Any] = ...): ...
-
-class EncodedFile:
- encoding: Any = ...
- def __init__(self, _stream: Any, encoding: Any) -> None: ...
- def write(self, obj: Any) -> None: ...
- def writelines(self, linelist: Any) -> None: ...
- def __getattr__(self, name: Any): ...
-
-class Capture:
- def call(cls, func: Any, *args: Any, **kwargs: Any): ...
- call: Any = ...
- def reset(self): ...
- def suspend(self): ...
-
-class StdCaptureFD(Capture):
- def __init__(self, out: bool = ..., err: bool = ..., mixed: bool = ..., in_: bool = ..., patchsys: bool = ..., now: bool = ...) -> None: ...
- def startall(self) -> None: ...
- def resume(self) -> None: ...
- def done(self, save: bool = ...): ...
- def readouterr(self): ...
-
-class StdCapture(Capture):
- out: Any = ...
- err: Any = ...
- in_: Any = ...
- def __init__(self, out: bool = ..., err: bool = ..., in_: bool = ..., mixed: bool = ..., now: bool = ...) -> None: ...
- def startall(self) -> None: ...
- def done(self, save: bool = ...): ...
- def resume(self) -> None: ...
- def readouterr(self): ...
-
-class DontReadFromInput:
- def read(self, *args: Any) -> None: ...
- readline: Any = ...
- readlines: Any = ...
- __iter__: Any = ...
- def fileno(self) -> None: ...
- def isatty(self): ...
- def close(self) -> None: ...
-
-devnullpath: Any
diff --git a/typeshed/3/py/_io/saferepr.pyi b/typeshed/3/py/_io/saferepr.pyi
deleted file mode 100644
index 1d305001d2..0000000000
--- a/typeshed/3/py/_io/saferepr.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-from typing import Any
-
-builtin_repr = repr
-reprlib: Any
-
-class SafeRepr(reprlib.Repr):
- def repr(self, x: Any): ...
- def repr_unicode(self, x: Any, level: Any): ...
- def repr_instance(self, x: Any, level: Any): ...
-
-def saferepr(obj: Any, maxsize: int = ...): ...
diff --git a/typeshed/3/py/_io/terminalwriter.pyi b/typeshed/3/py/_io/terminalwriter.pyi
deleted file mode 100644
index 23dc0e2eea..0000000000
--- a/typeshed/3/py/_io/terminalwriter.pyi
+++ /dev/null
@@ -1,72 +0,0 @@
-import ctypes
-from py.builtin import bytes as bytes, text as text
-from typing import Any, Optional
-
-py3k: Any
-py33: Any
-win32_and_ctypes: bool
-colorama: Any
-
-def get_terminal_width(): ...
-
-terminal_width: Any
-char_width: Any
-
-def get_line_width(text: Any): ...
-def ansi_print(text: Any, esc: Any, file: Optional[Any] = ..., newline: bool = ..., flush: bool = ...) -> None: ...
-def should_do_markup(file: Any): ...
-
-class TerminalWriter:
- stringio: Any = ...
- encoding: Any = ...
- hasmarkup: Any = ...
- def __init__(self, file: Optional[Any] = ..., stringio: bool = ..., encoding: Optional[Any] = ...) -> None: ...
- @property
- def fullwidth(self): ...
- @fullwidth.setter
- def fullwidth(self, value: Any) -> None: ...
- @property
- def chars_on_current_line(self): ...
- @property
- def width_of_current_line(self): ...
- def markup(self, text: Any, **kw: Any): ...
- def sep(self, sepchar: Any, title: Optional[Any] = ..., fullwidth: Optional[Any] = ..., **kw: Any) -> None: ...
- def write(self, msg: Any, **kw: Any) -> None: ...
- def line(self, s: str = ..., **kw: Any) -> None: ...
- def reline(self, line: Any, **kw: Any) -> None: ...
-
-class Win32ConsoleWriter(TerminalWriter):
- def write(self, msg: Any, **kw: Any) -> None: ...
-
-class WriteFile:
- encoding: Any = ...
- def __init__(self, writemethod: Any, encoding: Optional[Any] = ...) -> None: ...
- def write(self, data: Any) -> None: ...
- def flush(self) -> None: ...
-TerminalWriter = Win32ConsoleWriter
-STD_OUTPUT_HANDLE: int
-STD_ERROR_HANDLE: int
-FOREGROUND_BLACK: int
-FOREGROUND_BLUE: int
-FOREGROUND_GREEN: int
-FOREGROUND_RED: int
-FOREGROUND_WHITE: int
-FOREGROUND_INTENSITY: int
-BACKGROUND_BLACK: int
-BACKGROUND_BLUE: int
-BACKGROUND_GREEN: int
-BACKGROUND_RED: int
-BACKGROUND_WHITE: int
-BACKGROUND_INTENSITY: int
-SHORT = ctypes.c_short
-
-class COORD(ctypes.Structure): ...
-class SMALL_RECT(ctypes.Structure): ...
-class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): ...
-
-def GetStdHandle(kind: Any): ...
-
-SetConsoleTextAttribute: Any
-
-def GetConsoleInfo(handle: Any): ...
-def write_out(fil: Any, msg: Any) -> None: ...
diff --git a/typeshed/3/py/_log/__init__.pyi b/typeshed/3/py/_log/__init__.pyi
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/typeshed/3/py/_log/log.pyi b/typeshed/3/py/_log/log.pyi
deleted file mode 100644
index f4c4b80635..0000000000
--- a/typeshed/3/py/_log/log.pyi
+++ /dev/null
@@ -1,47 +0,0 @@
-from typing import Any, Optional
-
-class Message:
- keywords: Any = ...
- args: Any = ...
- def __init__(self, keywords: Any, args: Any) -> None: ...
- def content(self): ...
- def prefix(self): ...
-
-class Producer:
- Message: Any = ...
- keywords2consumer: Any = ...
- def __init__(self, keywords: Any, keywordmapper: Optional[Any] = ..., **kw: Any) -> None: ...
- def __getattr__(self, name: Any): ...
- def __call__(self, *args: Any) -> None: ...
-
-class KeywordMapper:
- keywords2consumer: Any = ...
- def __init__(self) -> None: ...
- def getstate(self): ...
- def setstate(self, state: Any) -> None: ...
- def getconsumer(self, keywords: Any): ...
- def setconsumer(self, keywords: Any, consumer: Any) -> None: ...
-
-def default_consumer(msg: Any) -> None: ...
-
-default_keywordmapper: Any
-
-def setconsumer(keywords: Any, consumer: Any) -> None: ...
-def setstate(state: Any) -> None: ...
-def getstate(): ...
-
-class File:
- def __init__(self, f: Any) -> None: ...
- def __call__(self, msg: Any) -> None: ...
-
-class Path:
- def __init__(self, filename: Any, append: bool = ..., delayed_create: bool = ..., buffering: bool = ...) -> None: ...
- def __call__(self, msg: Any) -> None: ...
-
-def STDOUT(msg: Any) -> None: ...
-def STDERR(msg: Any) -> None: ...
-
-class Syslog:
- priority: Any = ...
- def __init__(self, priority: Optional[Any] = ...) -> None: ...
- def __call__(self, msg: Any) -> None: ...
diff --git a/typeshed/3/py/_log/warning.pyi b/typeshed/3/py/_log/warning.pyi
deleted file mode 100644
index cd707a61de..0000000000
--- a/typeshed/3/py/_log/warning.pyi
+++ /dev/null
@@ -1,9 +0,0 @@
-from typing import Any, Optional
-
-class DeprecationWarning(DeprecationWarning):
- msg: Any = ...
- path: Any = ...
- lineno: Any = ...
- def __init__(self, msg: Any, path: Any, lineno: Any) -> None: ...
-
-def warn(msg: Any, stacklevel: int = ..., function: Optional[Any] = ...) -> None: ...
diff --git a/typeshed/3/py/_path/__init__.pyi b/typeshed/3/py/_path/__init__.pyi
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/typeshed/3/py/_path/cacheutil.pyi b/typeshed/3/py/_path/cacheutil.pyi
deleted file mode 100644
index 996bac798a..0000000000
--- a/typeshed/3/py/_path/cacheutil.pyi
+++ /dev/null
@@ -1,27 +0,0 @@
-from typing import Any
-
-class BasicCache:
- maxentries: Any = ...
- prunenum: Any = ...
- def __init__(self, maxentries: int = ...) -> None: ...
- def clear(self) -> None: ...
- def delentry(self, key: Any, raising: bool = ...) -> None: ...
- def getorbuild(self, key: Any, builder: Any): ...
-
-class BuildcostAccessCache(BasicCache): ...
-
-class WeightedCountingEntry:
- weight: Any = ...
- def __init__(self, value: Any, oneweight: Any) -> None: ...
- def value(self): ...
- value: Any = ...
-
-class AgingCache(BasicCache):
- maxseconds: Any = ...
- def __init__(self, maxentries: int = ..., maxseconds: float = ...) -> None: ...
-
-class AgingEntry:
- value: Any = ...
- weight: Any = ...
- def __init__(self, value: Any, expirationtime: Any) -> None: ...
- def isexpired(self): ...
diff --git a/typeshed/3/py/_path/common.pyi b/typeshed/3/py/_path/common.pyi
deleted file mode 100644
index d94a22e5a8..0000000000
--- a/typeshed/3/py/_path/common.pyi
+++ /dev/null
@@ -1,66 +0,0 @@
-from typing import Any, Optional, Generator
-
-iswin32: Any
-import_errors: Any
-
-class Checkers:
- path: Any = ...
- def __init__(self, path: Any) -> None: ...
- def dir(self) -> None: ...
- def file(self) -> None: ...
- def dotfile(self): ...
- def ext(self, arg: Any): ...
- def exists(self) -> None: ...
- def basename(self, arg: Any): ...
- def basestarts(self, arg: Any): ...
- def relto(self, arg: Any): ...
- def fnmatch(self, arg: Any): ...
- def endswith(self, arg: Any): ...
-
-class NeverRaised(Exception): ...
-
-class PathBase:
- Checkers: Any = ...
- def __div__(self, other: Any): ...
- __truediv__: Any = ...
- def basename(self): ...
- def dirname(self): ...
- def purebasename(self): ...
- def ext(self): ...
- def dirpath(self, *args: Any, **kwargs: Any): ...
- def read_binary(self): ...
- def read_text(self, encoding: Any): ...
- def read(self, mode: str = ...): ...
- def readlines(self, cr: int = ...): ...
- def load(self): ...
- def move(self, target: Any) -> None: ...
- def check(self, **kw: Any): ...
- def fnmatch(self, pattern: Any): ...
- def relto(self, relpath: Any): ...
- def ensure_dir(self, *args: Any): ...
- def bestrelpath(self, dest: Any): ...
- def exists(self): ...
- def isdir(self): ...
- def isfile(self): ...
- def parts(self, reverse: bool = ...): ...
- def common(self, other: Any): ...
- def __add__(self, other: Any): ...
- def __cmp__(self, other: Any): ...
- def __lt__(self, other: Any) -> Any: ...
- def visit(self, fil: Optional[Any] = ..., rec: Optional[Any] = ..., ignore: Any = ..., bf: bool = ..., sort: bool = ...) -> Generator[PathBase, None, None]: ...
- def samefile(self, other: Any): ...
- def __fspath__(self): ...
-
-class Visitor:
- rec: Any = ...
- fil: Any = ...
- ignore: Any = ...
- breadthfirst: Any = ...
- optsort: Any = ...
- def __init__(self, fil: Any, rec: Any, ignore: Any, bf: Any, sort: Any): ...
- def gen(self, path: Any) -> None: ...
-
-class FNMatcher:
- pattern: Any = ...
- def __init__(self, pattern: Any) -> None: ...
- def __call__(self, path: Any): ...
diff --git a/typeshed/3/py/_path/local.pyi b/typeshed/3/py/_path/local.pyi
deleted file mode 100644
index 2ce17166f2..0000000000
--- a/typeshed/3/py/_path/local.pyi
+++ /dev/null
@@ -1,89 +0,0 @@
-from py._path import common
-from py._path.common import iswin32
-from typing import Any, Optional
-
-
-def map_as_list(func: Any, iter: Any): ...
-
-ALLOW_IMPORTLIB_MODE: Any
-
-class Stat:
- def __getattr__(self, name: Any): ...
- path: Any = ...
- def __init__(self, path: Any, osstatresult: Any) -> None: ...
- @property
- def owner(self): ...
- @property
- def group(self): ...
- def isdir(self): ...
- def isfile(self): ...
- def islink(self): ...
-
-class PosixPath(common.PathBase):
- def chown(self, user: Any, group: Any, rec: int = ...): ...
- def readlink(self): ...
- def mklinkto(self, oldname: Any) -> None: ...
- def mksymlinkto(self, value: Any, absolute: int = ...) -> None: ...
-
-FSBase = PosixPath #or common.PathBase
-
-def getuserid(user: Any): ...
-def getgroupid(group: Any): ...
-
-class LocalPath(FSBase):
- class ImportMismatchError(ImportError): ...
- sep: Any = ...
- class Checkers(common.Checkers):
- def dir(self): ...
- def file(self): ...
- def exists(self): ...
- def link(self): ...
- strpath: Any = ...
- def __init__(self, path: Optional[Any] = ..., expanduser: bool = ...) -> None: ...
- def __hash__(self) -> Any: ...
- def __eq__(self, other: Any) -> Any: ...
- def __ne__(self, other: Any) -> Any: ...
- def __lt__(self, other: Any) -> Any: ...
- def __gt__(self, other: Any) -> Any: ...
- def samefile(self, other: Any): ...
- def remove(self, rec: int = ..., ignore_errors: bool = ...) -> None: ...
- def computehash(self, hashtype: str = ..., chunksize: int = ...): ...
- def new(self, **kw: Any): ...
- def dirpath(self, *args: Any, **kwargs: Any): ...
- def join(self, *args: Any, **kwargs: Any): ...
- def open(self, mode: str = ..., ensure: bool = ..., encoding: Optional[Any] = ...): ...
- def islink(self): ...
- def check(self, **kw: Any): ...
- def listdir(self, fil: Optional[Any] = ..., sort: Optional[Any] = ...): ...
- def size(self): ...
- def mtime(self): ...
- def copy(self, target: Any, mode: bool = ..., stat: bool = ...): ...
- def rename(self, target: Any): ...
- def dump(self, obj: Any, bin: int = ...) -> None: ...
- def mkdir(self, *args: Any): ...
- def write_binary(self, data: Any, ensure: bool = ...) -> None: ...
- def write_text(self, data: Any, encoding: Any, ensure: bool = ...) -> None: ...
- def write(self, data: Any, mode: str = ..., ensure: bool = ...) -> None: ...
- def ensure(self, *args: Any, **kwargs: Any): ...
- def stat(self, raising: bool = ...): ...
- def lstat(self): ...
- def setmtime(self, mtime: Optional[Any] = ...): ...
- def chdir(self): ...
- def as_cwd(self) -> None: ...
- def realpath(self): ...
- def atime(self): ...
- def chmod(self, mode: Any, rec: int = ...) -> None: ...
- def pypkgpath(self): ...
- def pyimport(self, modname: Optional[Any] = ..., ensuresyspath: bool = ...): ...
- def sysexec(self, *argv: Any, **popen_opts: Any): ...
- def sysfind(cls, name: Any, checker: Optional[Any] = ..., paths: Optional[Any] = ...): ...
- @classmethod
- def get_temproot(cls): ...
- @classmethod
- def mkdtemp(cls, rootdir: Optional[Any] = ...): ...
- def make_numbered_dir(cls, prefix: str = ..., rootdir: Optional[Any] = ..., keep: int = ..., lock_timeout: int = ...): ...
-
-def copymode(src: Any, dest: Any) -> None: ...
-def copystat(src: Any, dest: Any) -> None: ...
-def copychunked(src: Any, dest: Any) -> None: ...
-def isimportable(name: Any): ...
diff --git a/typeshed/3/py/_path/svnurl.pyi b/typeshed/3/py/_path/svnurl.pyi
deleted file mode 100644
index 0fb1565fc4..0000000000
--- a/typeshed/3/py/_path/svnurl.pyi
+++ /dev/null
@@ -1,43 +0,0 @@
-from py import path as path, process as process
-from py._path import common as common, svnwc as svncommon
-from py._path.cacheutil import AgingCache as AgingCache, BuildcostAccessCache as BuildcostAccessCache
-from typing import Any, Optional
-
-DEBUG: bool
-
-class SvnCommandPath(svncommon.SvnPathBase):
- strpath: Any = ...
- rev: Any = ...
- auth: Any = ...
- def __new__(cls, path: Any, rev: Optional[Any] = ..., auth: Optional[Any] = ...): ...
- def open(self, mode: str = ...): ...
- def dirpath(self, *args: Any, **kwargs: Any): ...
- def mkdir(self, *args: Any, **kwargs: Any): ...
- def copy(self, target: Any, msg: str = ...) -> None: ...
- def rename(self, target: Any, msg: str = ...) -> None: ...
- def remove(self, rec: int = ..., msg: str = ...) -> None: ...
- def export(self, topath: Any): ...
- def ensure(self, *args: Any, **kwargs: Any): ...
- def info(self): ...
- def listdir(self, fil: Optional[Any] = ..., sort: Optional[Any] = ...): ...
- def log(self, rev_start: Optional[Any] = ..., rev_end: int = ..., verbose: bool = ...): ...
-
-class InfoSvnCommand:
- lspattern: Any = ...
- kind: str = ...
- created_rev: Any = ...
- last_author: Any = ...
- size: Any = ...
- mtime: Any = ...
- time: Any = ...
- def __init__(self, line: Any) -> None: ...
- def __eq__(self, other: Any) -> Any: ...
-
-def parse_time_with_missing_year(timestr: Any): ...
-
-class PathEntry:
- strpath: Any = ...
- action: Any = ...
- copyfrom_path: Any = ...
- copyfrom_rev: Any = ...
- def __init__(self, ppart: Any) -> None: ...
diff --git a/typeshed/3/py/_path/svnwc.pyi b/typeshed/3/py/_path/svnwc.pyi
deleted file mode 100644
index 73f89e412d..0000000000
--- a/typeshed/3/py/_path/svnwc.pyi
+++ /dev/null
@@ -1,152 +0,0 @@
-import py
-from py._path import common as common
-from typing import Any, Optional
-
-class cache:
- proplist: Any = ...
- info: Any = ...
- entries: Any = ...
- prop: Any = ...
-
-class RepoEntry:
- url: Any = ...
- rev: Any = ...
- timestamp: Any = ...
- def __init__(self, url: Any, rev: Any, timestamp: Any) -> None: ...
-
-class RepoCache:
- timeout: int = ...
- repos: Any = ...
- def __init__(self) -> None: ...
- def clear(self) -> None: ...
- def put(self, url: Any, rev: Any, timestamp: Optional[Any] = ...) -> None: ...
- def get(self, url: Any): ...
-
-repositories: Any
-ALLOWED_CHARS: str
-ALLOWED_CHARS_HOST: Any
-
-def checkbadchars(url: Any) -> None: ...
-
-class SvnPathBase(common.PathBase):
- sep: str = ...
- url: Any = ...
- def __hash__(self) -> Any: ...
- def new(self, **kw: Any): ...
- def __eq__(self, other: Any) -> Any: ...
- def __ne__(self, other: Any) -> Any: ...
- def join(self, *args: Any): ...
- def propget(self, name: Any): ...
- def proplist(self): ...
- def size(self): ...
- def mtime(self): ...
- class Checkers(common.Checkers):
- def dir(self): ...
- def file(self): ...
- def exists(self): ...
-
-def parse_apr_time(timestr: Any): ...
-
-class PropListDict(dict):
- path: Any = ...
- def __init__(self, path: Any, keynames: Any) -> None: ...
- def __getitem__(self, key: Any): ...
-
-def fixlocale(): ...
-
-ILLEGAL_CHARS: Any
-ISWINDOWS: Any
-
-def path_to_fspath(path: Any, addat: bool = ...): ...
-def url_from_path(path: Any): ...
-
-class SvnAuth:
- username: Any = ...
- password: Any = ...
- cache_auth: Any = ...
- interactive: Any = ...
- def __init__(self, username: Any, password: Any, cache_auth: bool = ..., interactive: bool = ...) -> None: ...
- def makecmdoptions(self): ...
-
-rex_blame: Any
-
-class SvnWCCommandPath(common.PathBase):
- sep: Any = ...
- localpath: Any = ...
- auth: Any = ...
- def __new__(cls, wcpath: Optional[Any] = ..., auth: Optional[Any] = ...): ...
- strpath: Any = ...
- rev: Any = ...
- def __eq__(self, other: Any) -> Any: ...
- url: Any = ...
- def dump(self, obj: Any): ...
- def svnurl(self): ...
- def switch(self, url: Any) -> None: ...
- def checkout(self, url: Optional[Any] = ..., rev: Optional[Any] = ...) -> None: ...
- def update(self, rev: str = ..., interactive: bool = ...) -> None: ...
- def write(self, content: Any, mode: str = ...) -> None: ...
- def ensure(self, *args: Any, **kwargs: Any): ...
- def mkdir(self, *args: Any): ...
- def add(self) -> None: ...
- def remove(self, rec: int = ..., force: int = ...) -> None: ...
- def copy(self, target: Any) -> None: ...
- def rename(self, target: Any) -> None: ...
- def lock(self) -> None: ...
- def unlock(self) -> None: ...
- def cleanup(self) -> None: ...
- def status(self, updates: int = ..., rec: int = ..., externals: int = ...): ...
- def diff(self, rev: Optional[Any] = ...): ...
- def blame(self): ...
- def commit(self, msg: str = ..., rec: int = ...): ...
- def propset(self, name: Any, value: Any, *args: Any) -> None: ...
- def propget(self, name: Any): ...
- def propdel(self, name: Any): ...
- def proplist(self, rec: int = ...): ...
- def revert(self, rec: int = ...): ...
- def new(self, **kw: Any): ...
- def join(self, *args: Any, **kwargs: Any): ...
- def info(self, usecache: int = ...): ...
- def listdir(self, fil: Optional[Any] = ..., sort: Optional[Any] = ...): ...
- def open(self, mode: str = ...): ...
- def log(self, rev_start: Optional[Any] = ..., rev_end: int = ..., verbose: bool = ...): ...
- def size(self): ...
- def mtime(self): ...
- def __hash__(self) -> Any: ...
-
-class WCStatus:
- attrnames: Any = ...
- wcpath: Any = ...
- rev: Any = ...
- modrev: Any = ...
- author: Any = ...
- def __init__(self, wcpath: Any, rev: Optional[Any] = ..., modrev: Optional[Any] = ..., author: Optional[Any] = ...) -> None: ...
- def allpath(self, sort: bool = ..., **kw: Any): ...
- def fromstring(data: Any, rootwcpath: Any, rev: Optional[Any] = ..., modrev: Optional[Any] = ..., author: Optional[Any] = ...): ...
-
-class XMLWCStatus(WCStatus):
- def fromstring(data: Any, rootwcpath: Any, rev: Optional[Any] = ..., modrev: Optional[Any] = ..., author: Optional[Any] = ...): ...
-
-class InfoSvnWCCommand:
- url: Any = ...
- kind: Any = ...
- rev: Any = ...
- path: Any = ...
- size: Any = ...
- created_rev: Any = ...
- last_author: Any = ...
- mtime: Any = ...
- time: Any = ...
- def __init__(self, output: Any) -> None: ...
- def __eq__(self, other: Any) -> Any: ...
-
-def parse_wcinfotime(timestr: Any): ...
-def make_recursive_propdict(wcroot: Any, output: Any, rex: Any = ...): ...
-def importxml(cache: Any = ...): ...
-
-class LogEntry:
- rev: Any = ...
- author: Any = ...
- msg: Any = ...
- date: Any = ...
- strpaths: Any = ...
- def __init__(self, logentry: Any) -> None: ...
diff --git a/typeshed/3/py/_process/__init__.pyi b/typeshed/3/py/_process/__init__.pyi
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/typeshed/3/py/_process/cmdexec.pyi b/typeshed/3/py/_process/cmdexec.pyi
deleted file mode 100644
index f3f330f013..0000000000
--- a/typeshed/3/py/_process/cmdexec.pyi
+++ /dev/null
@@ -1,13 +0,0 @@
-import py
-from subprocess import PIPE as PIPE, Popen as Popen
-from typing import Any
-
-def cmdexec(cmd: Any): ...
-
-class ExecutionFailed(py.error.Error):
- status: Any = ...
- systemstatus: Any = ...
- cmd: Any = ...
- err: Any = ...
- out: Any = ...
- def __init__(self, status: Any, systemstatus: Any, cmd: Any, out: Any, err: Any) -> None: ...
diff --git a/typeshed/3/py/_process/forkedfunc.pyi b/typeshed/3/py/_process/forkedfunc.pyi
deleted file mode 100644
index 716c3acb83..0000000000
--- a/typeshed/3/py/_process/forkedfunc.pyi
+++ /dev/null
@@ -1,25 +0,0 @@
-from typing import Any, Optional
-
-def get_unbuffered_io(fd: Any, filename: Any): ...
-
-class ForkedFunc:
- EXITSTATUS_EXCEPTION: int = ...
- fun: Any = ...
- args: Any = ...
- kwargs: Any = ...
- tempdir: Any = ...
- RETVAL: Any = ...
- STDOUT: Any = ...
- STDERR: Any = ...
- pid: Any = ...
- def __init__(self, fun: Any, args: Optional[Any] = ..., kwargs: Optional[Any] = ..., nice_level: int = ..., child_on_start: Optional[Any] = ..., child_on_exit: Optional[Any] = ...) -> None: ...
- def waitfinish(self, waiter: Any = ...): ...
- def __del__(self) -> None: ...
-
-class Result:
- exitstatus: Any = ...
- signal: Any = ...
- retval: Any = ...
- out: Any = ...
- err: Any = ...
- def __init__(self, exitstatus: Any, signal: Any, retval: Any, stdout: Any, stderr: Any) -> None: ...
diff --git a/typeshed/3/py/_process/killproc.pyi b/typeshed/3/py/_process/killproc.pyi
deleted file mode 100644
index ed2f9b3d0f..0000000000
--- a/typeshed/3/py/_process/killproc.pyi
+++ /dev/null
@@ -1,4 +0,0 @@
-from typing import Any
-
-def dokill(pid: Any) -> None: ...
-def kill(pid: Any) -> None: ...
diff --git a/typeshed/3/py/_std.pyi b/typeshed/3/py/_std.pyi
deleted file mode 100644
index 303052b061..0000000000
--- a/typeshed/3/py/_std.pyi
+++ /dev/null
@@ -1,10 +0,0 @@
-from typing import Any
-
-class PyStdIsDeprecatedWarning(DeprecationWarning): ...
-
-class Std:
- __dict__: Any = ...
- def __init__(self) -> None: ...
- def __getattr__(self, name: Any): ...
-
-std: Any
diff --git a/typeshed/3/py/_version.pyi b/typeshed/3/py/_version.pyi
deleted file mode 100644
index c2ee2cab48..0000000000
--- a/typeshed/3/py/_version.pyi
+++ /dev/null
@@ -1 +0,0 @@
-version: str
diff --git a/typeshed/3/py/_xmlgen.pyi b/typeshed/3/py/_xmlgen.pyi
deleted file mode 100644
index 85d1297b80..0000000000
--- a/typeshed/3/py/_xmlgen.pyi
+++ /dev/null
@@ -1,61 +0,0 @@
-from typing import Any, Optional
-
-def u(s: Any): ...
-def unicode(x: Any, errors: Optional[Any] = ...): ...
-
-class NamespaceMetaclass(type):
- def __getattr__(self, name: Any): ...
-
-class Tag(list):
- class Attr:
- def __init__(self, **kwargs: Any) -> None: ...
- attr: Any = ...
- def __init__(self, *args: Any, **kwargs: Any) -> None: ...
- def __unicode__(self): ...
- def unicode(self, indent: int = ...): ...
-
-Namespace: Any
-
-class HtmlTag(Tag):
- def unicode(self, indent: int = ...): ...
-
-class html(Namespace):
- __tagclass__: Any = ...
- __stickyname__: bool = ...
- __tagspec__: Any = ...
- class Style:
- def __init__(self, **kw: Any) -> None: ...
-
-class raw:
- uniobj: Any = ...
- def __init__(self, uniobj: Any) -> None: ...
-
-class SimpleUnicodeVisitor:
- write: Any = ...
- cache: Any = ...
- visited: Any = ...
- indent: Any = ...
- curindent: Any = ...
- parents: Any = ...
- shortempty: Any = ...
- def __init__(self, write: Any, indent: int = ..., curindent: int = ..., shortempty: bool = ...) -> None: ...
- def visit(self, node: Any) -> None: ...
- def raw(self, obj: Any) -> None: ...
- def list(self, obj: Any) -> None: ...
- def Tag(self, tag: Any) -> None: ...
- def attributes(self, tag: Any): ...
- def repr_attribute(self, attrs: Any, name: Any): ...
- def getstyle(self, tag: Any): ...
-
-class HtmlVisitor(SimpleUnicodeVisitor):
- single: Any = ...
- inline: Any = ...
- def repr_attribute(self, attrs: Any, name: Any): ...
-
-class _escape:
- escape: Any = ...
- charef_rex: Any = ...
- def __init__(self) -> None: ...
- def __call__(self, ustring: Any): ...
-
-escape: Any
diff --git a/typeshed/3/py/path.pyi b/typeshed/3/py/path.pyi
deleted file mode 100644
index 3070f07f47..0000000000
--- a/typeshed/3/py/path.pyi
+++ /dev/null
@@ -1,148 +0,0 @@
-from typing import Any
-
-import py._path.common
-import py._path.local
-import py._path.svnwc
-_ApiModule__doc: str
-
-class SvnAuth:
- __init__: Any = ...
- makecmdoptions: Any = ...
-
-class local(py._path.local.PosixPath):
- __init__: Any = ...
- Checkers: Any = ...
- ImportMismatchError: Any = ...
- _ensuredirs: Any = ...
- _ensuresyspath: Any = ...
- _fastjoin: Any = ...
- _getbyspec: Any = ...
- _patternchars: Any = ...
- as_cwd: Any = ...
- atime: Any = ...
- chdir: Any = ...
- check: Any = ...
- chmod: Any = ...
- computehash: Any = ...
- copy: Any = ...
- dirpath: Any = ...
- dump: Any = ...
- ensure: Any = ...
- islink: Any = ...
- join: Any = ...
- listdir: Any = ...
- lstat: Any = ...
- mkdir: Any = ...
- mtime: Any = ...
- new: Any = ...
- open: Any = ...
- pyimport: Any = ...
- pypkgpath: Any = ...
- realpath: Any = ...
- remove: Any = ...
- rename: Any = ...
- samefile: Any = ...
- sep: Any = ...
- setmtime: Any = ...
- size: Any = ...
- stat: Any = ...
- sysexec: Any = ...
- write: Any = ...
- write_binary: Any = ...
- write_text: Any = ...
- __eq__: Any = ...
- __gt__: Any = ...
- __hash__: Any = ...
- __lt__: Any = ...
- __ne__: Any = ...
- @classmethod
- def _gethomedir(self, *args, **kwargs) -> Any: ...
- @classmethod
- def get_temproot(self, *args, **kwargs) -> Any: ...
- @classmethod
- def make_numbered_dir(self, *args, **kwargs) -> Any: ...
- @classmethod
- def mkdtemp(self, *args, **kwargs) -> Any: ...
- @classmethod
- def sysfind(self, *args, **kwargs) -> Any: ...
-
-class svnurl(py._path.svnwc.SvnPathBase):
- _cmdexec: Any = ...
- _encodedurl: Any = ...
- _listdir_nameinfo: Any = ...
- _lsnorevcache: Any = ...
- _lsrevcache: Any = ...
- _norev_delentry: Any = ...
- _popen: Any = ...
- _propget: Any = ...
- _proplist: Any = ...
- _svncmdexecauth: Any = ...
- _svnpopenauth: Any = ...
- _svnwithrev: Any = ...
- _svnwrite: Any = ...
- copy: Any = ...
- dirpath: Any = ...
- ensure: Any = ...
- export: Any = ...
- info: Any = ...
- listdir: Any = ...
- log: Any = ...
- mkdir: Any = ...
- open: Any = ...
- remove: Any = ...
- rename: Any = ...
- def __init__(self, *args, **kwargs) -> None: ...
-
-class svnwc(py._path.common.PathBase):
- Checkers: Any = ...
- _authsvn: Any = ...
- _ensuredirs: Any = ...
- _escape: Any = ...
- _getbyspec: Any = ...
- _geturl: Any = ...
- _makeauthoptions: Any = ...
- _rex_commit: Any = ...
- _svn: Any = ...
- add: Any = ...
- blame: Any = ...
- checkout: Any = ...
- cleanup: Any = ...
- commit: Any = ...
- copy: Any = ...
- diff: Any = ...
- dirpath: Any = ...
- dump: Any = ...
- ensure: Any = ...
- info: Any = ...
- join: Any = ...
- listdir: Any = ...
- lock: Any = ...
- log: Any = ...
- mkdir: Any = ...
- mtime: Any = ...
- new: Any = ...
- open: Any = ...
- propdel: Any = ...
- propget: Any = ...
- proplist: Any = ...
- propset: Any = ...
- remove: Any = ...
- rename: Any = ...
- revert: Any = ...
- sep: Any = ...
- size: Any = ...
- status: Any = ...
- svnurl: Any = ...
- switch: Any = ...
- unlock: Any = ...
- update: Any = ...
- write: Any = ...
- __eq__: Any = ...
- __hash__: Any = ...
- def __init__(self, *args, **kwargs) -> None: ...
- @property
- def rev(self) -> Any: ...
- @property
- def strpath(self) -> Any: ...
- @property
- def url(self) -> Any: ...
diff --git a/typeshed/3/subprocess.pyi b/typeshed/3/subprocess.pyi
deleted file mode 100644
index 5494d927d0..0000000000
--- a/typeshed/3/subprocess.pyi
+++ /dev/null
@@ -1,1208 +0,0 @@
-# Stubs for subprocess
-
-# Based on http://docs.python.org/3.6/library/subprocess.html
-import sys
-from typing import Sequence, Any, Mapping, Callable, Tuple, IO, Optional, Union, Type, Text, Generic, TypeVar, AnyStr, overload
-from types import TracebackType
-
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from typing_extensions import Literal
-
-# We prefer to annotate inputs to methods (eg subprocess.check_call) with these
-# union types.
-# For outputs we use laborious literal based overloads to try to determine
-# which specific return types to use, and prefer to fall back to Any when
-# this does not work, so the caller does not have to use an assertion to confirm
-# which type.
-#
-# For example:
-#
-# try:
-# x = subprocess.check_output(["ls", "-l"])
-# reveal_type(x) # bytes, based on the overloads
-# except TimeoutError as e:
-# reveal_type(e.cmd) # Any, but morally is _CMD
-_FILE = Union[None, int, IO[Any]]
-_TXT = Union[bytes, Text]
-if sys.version_info >= (3, 6):
- from builtins import _PathLike
- _PATH = Union[bytes, Text, _PathLike]
-else:
- _PATH = Union[bytes, Text]
-# Python 3.6 does't support _CMD being a single PathLike.
-# See: https://bugs.python.org/issue31961
-_CMD = Union[_TXT, Sequence[_PATH]]
-_ENV = Union[Mapping[bytes, _TXT], Mapping[Text, _TXT]]
-
-_S = TypeVar('_S')
-_T = TypeVar('_T')
-
-class CompletedProcess(Generic[_T]):
- # morally: _CMD
- args: Any
- returncode: Optional[int]
- # These are really both Optional, but requiring checks would be tedious
- # and writing all the overloads would be horrific.
- stdout: _T
- stderr: _T
- def __init__(self, args: _CMD, returncode: int, stdout: Optional[_T] = ..., stderr: Optional[_T] = ...) -> None: ...
- def check_returncode(self) -> None: ...
-
-if sys.version_info >= (3, 7):
- # Nearly the same args as for 3.6, except for capture_output and text
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- capture_output: bool = ...,
- check: bool = ...,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...,
- input: Optional[str] = ...,
- text: Literal[True],
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- capture_output: bool = ...,
- check: bool = ...,
- encoding: str,
- errors: Optional[str] = ...,
- input: Optional[str] = ...,
- text: Optional[bool] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- capture_output: bool = ...,
- check: bool = ...,
- encoding: Optional[str] = ...,
- errors: str,
- input: Optional[str] = ...,
- text: Optional[bool] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- *,
- universal_newlines: Literal[True],
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- # where the *real* keyword only args start
- capture_output: bool = ...,
- check: bool = ...,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...,
- input: Optional[str] = ...,
- text: Optional[bool] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- capture_output: bool = ...,
- check: bool = ...,
- encoding: None = ...,
- errors: None = ...,
- input: Optional[bytes] = ...,
- text: Literal[None, False] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[bytes]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- capture_output: bool = ...,
- check: bool = ...,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...,
- input: Optional[_TXT] = ...,
- text: Optional[bool] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[Any]: ...
-elif sys.version_info >= (3, 6):
- # Nearly same args as Popen.__init__ except for timeout, input, and check
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- check: bool = ...,
- encoding: str,
- errors: Optional[str] = ...,
- input: Optional[str] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- check: bool = ...,
- encoding: Optional[str] = ...,
- errors: str,
- input: Optional[str] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- *,
- universal_newlines: Literal[True],
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- # where the *real* keyword only args start
- check: bool = ...,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...,
- input: Optional[str] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- check: bool = ...,
- encoding: None = ...,
- errors: None = ...,
- input: Optional[bytes] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[bytes]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- check: bool = ...,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...,
- input: Optional[_TXT] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[Any]: ...
-else:
- # Nearly same args as Popen.__init__ except for timeout, input, and check
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- *,
- universal_newlines: Literal[True],
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- # where the *real* keyword only args start
- check: bool = ...,
- input: Optional[str] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[str]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- check: bool = ...,
- input: Optional[bytes] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[bytes]: ...
- @overload
- def run(
- args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- check: bool = ...,
- input: Optional[_TXT] = ...,
- timeout: Optional[float] = ...,
- ) -> CompletedProcess[Any]: ...
-
-# Same args as Popen.__init__
-def call(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- timeout: Optional[float] = ...) -> int: ...
-
-# Same args as Popen.__init__
-def check_call(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stdout: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- timeout: Optional[float] = ...) -> int: ...
-
-if sys.version_info >= (3, 7):
- # 3.7 added text
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...,
- text: Literal[True],
- ) -> str: ...
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- encoding: str,
- errors: Optional[str] = ...,
- text: Optional[bool] = ...,
- ) -> str: ...
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- encoding: Optional[str] = ...,
- errors: str,
- text: Optional[bool] = ...,
- ) -> str: ...
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- *,
- universal_newlines: Literal[True],
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- # where the real keyword only ones start
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...,
- text: Optional[bool] = ...,
- ) -> str: ...
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- encoding: None = ...,
- errors: None = ...,
- text: Literal[None, False] = ...,
- ) -> bytes: ...
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...,
- text: Optional[bool] = ...,
- ) -> Any: ... # morally: -> _TXT
-elif sys.version_info >= (3, 6):
- # 3.6 added encoding and errors
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- encoding: str,
- errors: Optional[str] = ...,
- ) -> str: ...
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- encoding: Optional[str] = ...,
- errors: str,
- ) -> str: ...
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- universal_newlines: Literal[True],
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...,
- ) -> str: ...
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- encoding: None = ...,
- errors: None = ...,
- ) -> bytes: ...
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...,
- ) -> Any: ... # morally: -> _TXT
-else:
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- *,
- universal_newlines: Literal[True],
- ) -> str: ...
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- ) -> bytes: ...
- @overload
- def check_output(args: _CMD,
- bufsize: int = ...,
- executable: _PATH = ...,
- stdin: _FILE = ...,
- stderr: _FILE = ...,
- preexec_fn: Callable[[], Any] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Any = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- timeout: Optional[float] = ...,
- input: _TXT = ...,
- ) -> Any: ... # morally: -> _TXT
-
-
-PIPE: int
-STDOUT: int
-DEVNULL: int
-class SubprocessError(Exception): ...
-class TimeoutExpired(SubprocessError):
- def __init__(self, cmd: _CMD, timeout: float, output: Optional[_TXT] = ..., stderr: Optional[_TXT] = ...) -> None: ...
- # morally: _CMD
- cmd: Any
- timeout: float
- # morally: Optional[_TXT]
- output: Any
- stdout: Any
- stderr: Any
-
-
-class CalledProcessError(Exception):
- returncode: Optional[int]
- # morally: _CMD
- cmd: Any
- # morally: Optional[_TXT]
- output: Any
-
- # morally: Optional[_TXT]
- stdout: Any
- stderr: Any
-
- def __init__(self,
- returncode: int,
- cmd: _CMD,
- output: Optional[_TXT] = ...,
- stderr: Optional[_TXT] = ...) -> None: ...
-
-class Popen(Generic[AnyStr]):
- args: _CMD
- stdin: IO[AnyStr]
- stdout: IO[AnyStr]
- stderr: IO[AnyStr]
- pid: int
- returncode: Optional[int]
-
- # Technically it is wrong that Popen provides __new__ instead of __init__
- # but this shouldn't come up hopefully?
-
- if sys.version_info >= (3, 7):
- # text is added in 3.7
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- text: Optional[bool] = ...,
- encoding: str,
- errors: Optional[str] = ...) -> Popen[str]: ...
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- text: Optional[bool] = ...,
- encoding: Optional[str] = ...,
- errors: str) -> Popen[str]: ...
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- *,
- universal_newlines: Literal[True],
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- # where the *real* keyword only args start
- text: Optional[bool] = ...,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...) -> Popen[str]: ...
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- text: Literal[True],
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...) -> Popen[str]: ...
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- text: Literal[None, False] = ...,
- encoding: None = ...,
- errors: None = ...) -> Popen[bytes]: ...
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- text: Optional[bool] = ...,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...) -> Popen[Any]: ...
- elif sys.version_info >= (3, 6):
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- encoding: str,
- errors: Optional[str] = ...) -> Popen[str]: ...
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- encoding: Optional[str] = ...,
- errors: str) -> Popen[str]: ...
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- *,
- universal_newlines: Literal[True],
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- # where the *real* keyword only args start
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...) -> Popen[str]: ...
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: Literal[False] = ...,
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- encoding: None = ...,
- errors: None = ...) -> Popen[bytes]: ...
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...,
- *,
- encoding: Optional[str] = ...,
- errors: Optional[str] = ...) -> Popen[Any]: ...
- else:
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- *,
- universal_newlines: Literal[True],
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...) -> Popen[str]: ...
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- *,
- universal_newlines: Literal[False] = ...,
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...) -> Popen[bytes]: ...
- @overload
- def __new__(cls,
- args: _CMD,
- bufsize: int = ...,
- executable: Optional[_PATH] = ...,
- stdin: Optional[_FILE] = ...,
- stdout: Optional[_FILE] = ...,
- stderr: Optional[_FILE] = ...,
- preexec_fn: Optional[Callable[[], Any]] = ...,
- close_fds: bool = ...,
- shell: bool = ...,
- cwd: Optional[_PATH] = ...,
- env: Optional[_ENV] = ...,
- universal_newlines: bool = ...,
- startupinfo: Optional[Any] = ...,
- creationflags: int = ...,
- restore_signals: bool = ...,
- start_new_session: bool = ...,
- pass_fds: Any = ...) -> Popen[Any]: ...
-
- def poll(self) -> int: ...
- def wait(self, timeout: Optional[float] = ...) -> int: ...
- # Return str/bytes
- def communicate(self,
- input: Optional[AnyStr] = ...,
- timeout: Optional[float] = ...,
- # morally this should be optional
- ) -> Tuple[Optional[AnyStr], AnyStr]: ...
- def send_signal(self, signal: int) -> None: ...
- def terminate(self) -> None: ...
- def kill(self) -> None: ...
- def __enter__(self: _S) -> _S: ...
- def __exit__(self, type: Optional[Type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType]) -> None: ...
-
-# The result really is always a str.
-def getstatusoutput(cmd: _TXT) -> Tuple[int, str]: ...
-def getoutput(cmd: _TXT) -> str: ...
-
-def list2cmdline(seq: Sequence[str]) -> str: ... # undocumented
-
-if sys.platform == 'win32':
- class STARTUPINFO:
- if sys.version_info >= (3, 7):
- def __init__(self, *, dwFlags: int = ..., hStdInput: Optional[Any] = ..., hStdOutput: Optional[Any] = ..., hStdError: Optional[Any] = ..., wShowWindow: int = ..., lpAttributeList: Optional[Mapping[str, Any]] = ...) -> None: ...
- dwFlags: int
- hStdInput: Optional[Any]
- hStdOutput: Optional[Any]
- hStdError: Optional[Any]
- wShowWindow: int
- if sys.version_info >= (3, 7):
- lpAttributeList: Mapping[str, Any]
-
- STD_INPUT_HANDLE: Any
- STD_OUTPUT_HANDLE: Any
- STD_ERROR_HANDLE: Any
- SW_HIDE: int
- STARTF_USESTDHANDLES: int
- STARTF_USESHOWWINDOW: int
- CREATE_NEW_CONSOLE: int
- CREATE_NEW_PROCESS_GROUP: int
- if sys.version_info >= (3, 7):
- ABOVE_NORMAL_PRIORITY_CLASS: int
- BELOW_NORMAL_PRIORITY_CLASS: int
- HIGH_PRIORITY_CLASS: int
- IDLE_PRIORITY_CLASS: int
- NORMAL_PRIORITY_CLASS: int
- REALTIME_PRIORITY_CLASS: int
- CREATE_NO_WINDOW: int
- DETACHED_PROCESS: int
- CREATE_DEFAULT_ERROR_MODE: int
- CREATE_BREAKAWAY_FROM_JOB: int
diff --git a/typeshed/3/typing.pyi b/typeshed/3/typing.pyi
deleted file mode 100644
index 6fb66ad5f9..0000000000
--- a/typeshed/3/typing.pyi
+++ /dev/null
@@ -1,649 +0,0 @@
-# Stubs for typing
-
-import sys
-from abc import abstractmethod, ABCMeta
-from types import CodeType, FrameType, TracebackType
-import collections # Needed by aliases like DefaultDict, see mypy issue 2986
-
-# Definitions of special type checking related constructs. Their definitions
-# are not used, so their value does not matter.
-
-overload = object()
-Any = object()
-TypeVar = object()
-_promote = object()
-
-class _SpecialForm:
- def __getitem__(self, typeargs: Any) -> Any: ...
-
-Tuple: _SpecialForm = ...
-Generic: _SpecialForm = ...
-Protocol: _SpecialForm = ...
-Callable: _SpecialForm = ...
-Type: _SpecialForm = ...
-ClassVar: _SpecialForm = ...
-if sys.version_info >= (3, 8):
- Final: _SpecialForm = ...
- _F = TypeVar('_F', bound=Callable[..., Any])
- def final(f: _F) -> _F: ...
- Literal: _SpecialForm = ...
- # TypedDict is a (non-subscriptable) special form.
- TypedDict: object
-
-class GenericMeta(type): ...
-
-# Return type that indicates a function does not return.
-# This type is equivalent to the None type, but the no-op Union is necessary to
-# distinguish the None type from the None value.
-NoReturn = Union[None]
-
-# These type variables are used by the container types.
-_T = TypeVar('_T')
-_S = TypeVar('_S')
-_KT = TypeVar('_KT') # Key type.
-_VT = TypeVar('_VT') # Value type.
-_T_co = TypeVar('_T_co', covariant=True) # Any type covariant containers.
-_V_co = TypeVar('_V_co', covariant=True) # Any type covariant containers.
-_KT_co = TypeVar('_KT_co', covariant=True) # Key type covariant containers.
-_VT_co = TypeVar('_VT_co', covariant=True) # Value type covariant containers.
-_T_contra = TypeVar('_T_contra', contravariant=True) # Ditto contravariant.
-_TC = TypeVar('_TC', bound=Type[object])
-_C = TypeVar("_C", bound=Callable[..., Any])
-
-no_type_check = object()
-def no_type_check_decorator(decorator: _C) -> _C: ...
-
-# Type aliases and type constructors
-
-class TypeAlias:
- # Class for defining generic aliases for library types.
- def __init__(self, target_type: type) -> None: ...
- def __getitem__(self, typeargs: Any) -> Any: ...
-
-Union = TypeAlias(object)
-Optional = TypeAlias(object)
-List = TypeAlias(object)
-Dict = TypeAlias(object)
-DefaultDict = TypeAlias(object)
-Set = TypeAlias(object)
-FrozenSet = TypeAlias(object)
-Counter = TypeAlias(object)
-Deque = TypeAlias(object)
-ChainMap = TypeAlias(object)
-
-if sys.version_info >= (3, 7):
- OrderedDict = TypeAlias(object)
-
-# Predefined type variables.
-AnyStr = TypeVar('AnyStr', str, bytes)
-
-# Abstract base classes.
-
-def runtime_checkable(cls: _TC) -> _TC: ...
-
-@runtime_checkable
-class SupportsInt(Protocol, metaclass=ABCMeta):
- @abstractmethod
- def __int__(self) -> int: ...
-
-@runtime_checkable
-class SupportsFloat(Protocol, metaclass=ABCMeta):
- @abstractmethod
- def __float__(self) -> float: ...
-
-@runtime_checkable
-class SupportsComplex(Protocol, metaclass=ABCMeta):
- @abstractmethod
- def __complex__(self) -> complex: ...
-
-@runtime_checkable
-class SupportsBytes(Protocol, metaclass=ABCMeta):
- @abstractmethod
- def __bytes__(self) -> bytes: ...
-
-if sys.version_info >= (3, 8):
- @runtime_checkable
- class SupportsIndex(Protocol, metaclass=ABCMeta):
- @abstractmethod
- def __index__(self) -> int: ...
-
-@runtime_checkable
-class SupportsAbs(Protocol[_T_co]):
- @abstractmethod
- def __abs__(self) -> _T_co: ...
-
-@runtime_checkable
-class SupportsRound(Protocol[_T_co]):
- @overload
- @abstractmethod
- def __round__(self) -> int: ...
- @overload
- @abstractmethod
- def __round__(self, ndigits: int) -> _T_co: ...
-
-@runtime_checkable
-class Reversible(Protocol[_T_co]):
- @abstractmethod
- def __reversed__(self) -> Iterator[_T_co]: ...
-
-@runtime_checkable
-class Sized(Protocol, metaclass=ABCMeta):
- @abstractmethod
- def __len__(self) -> int: ...
-
-@runtime_checkable
-class Hashable(Protocol, metaclass=ABCMeta):
- # TODO: This is special, in that a subclass of a hashable class may not be hashable
- # (for example, list vs. object). It's not obvious how to represent this. This class
- # is currently mostly useless for static checking.
- @abstractmethod
- def __hash__(self) -> int: ...
-
-@runtime_checkable
-class Iterable(Protocol[_T_co]):
- @abstractmethod
- def __iter__(self) -> Iterator[_T_co]: ...
-
-@runtime_checkable
-class Iterator(Iterable[_T_co], Protocol[_T_co]):
- @abstractmethod
- def __next__(self) -> _T_co: ...
- def __iter__(self) -> Iterator[_T_co]: ...
-
-class Generator(Iterator[_T_co], Generic[_T_co, _T_contra, _V_co]):
- @abstractmethod
- def __next__(self) -> _T_co: ...
-
- @abstractmethod
- def send(self, value: _T_contra) -> _T_co: ...
-
- @abstractmethod
- def throw(self, typ: Type[BaseException], val: Optional[BaseException] = ...,
- tb: Optional[TracebackType] = ...) -> _T_co: ...
-
- @abstractmethod
- def close(self) -> None: ...
-
- @abstractmethod
- def __iter__(self) -> Generator[_T_co, _T_contra, _V_co]: ...
-
- @property
- def gi_code(self) -> CodeType: ...
- @property
- def gi_frame(self) -> FrameType: ...
- @property
- def gi_running(self) -> bool: ...
- @property
- def gi_yieldfrom(self) -> Optional[Generator[Any, Any, Any]]: ...
-
-@runtime_checkable
-class Awaitable(Protocol[_T_co]):
- @abstractmethod
- def __await__(self) -> Generator[Any, None, _T_co]: ...
-
-class Coroutine(Awaitable[_V_co], Generic[_T_co, _T_contra, _V_co]):
- @property
- def cr_await(self) -> Optional[Any]: ...
- @property
- def cr_code(self) -> CodeType: ...
- @property
- def cr_frame(self) -> FrameType: ...
- @property
- def cr_running(self) -> bool: ...
-
- @abstractmethod
- def send(self, value: _T_contra) -> _T_co: ...
-
- @abstractmethod
- def throw(self, typ: Type[BaseException], val: Optional[BaseException] = ...,
- tb: Optional[TracebackType] = ...) -> _T_co: ...
-
- @abstractmethod
- def close(self) -> None: ...
-
-
-# NOTE: This type does not exist in typing.py or PEP 484.
-# The parameters correspond to Generator, but the 4th is the original type.
-class AwaitableGenerator(Awaitable[_V_co], Generator[_T_co, _T_contra, _V_co],
- Generic[_T_co, _T_contra, _V_co, _S], metaclass=ABCMeta): ...
-
-@runtime_checkable
-class AsyncIterable(Protocol[_T_co]):
- @abstractmethod
- def __aiter__(self) -> AsyncIterator[_T_co]: ...
-
-@runtime_checkable
-class AsyncIterator(AsyncIterable[_T_co],
- Protocol[_T_co]):
- @abstractmethod
- def __anext__(self) -> Awaitable[_T_co]: ...
- def __aiter__(self) -> AsyncIterator[_T_co]: ...
-
-if sys.version_info >= (3, 6):
- class AsyncGenerator(AsyncIterator[_T_co], Generic[_T_co, _T_contra]):
- @abstractmethod
- def __anext__(self) -> Awaitable[_T_co]: ...
-
- @abstractmethod
- def asend(self, value: _T_contra) -> Awaitable[_T_co]: ...
-
- @abstractmethod
- def athrow(self, typ: Type[BaseException], val: Optional[BaseException] = ...,
- tb: Any = ...) -> Awaitable[_T_co]: ...
-
- @abstractmethod
- def aclose(self) -> Awaitable[None]: ...
-
- @abstractmethod
- def __aiter__(self) -> AsyncGenerator[_T_co, _T_contra]: ...
-
- @property
- def ag_await(self) -> Any: ...
- @property
- def ag_code(self) -> CodeType: ...
- @property
- def ag_frame(self) -> FrameType: ...
- @property
- def ag_running(self) -> bool: ...
-
-@runtime_checkable
-class Container(Protocol[_T_co]):
- @abstractmethod
- def __contains__(self, __x: object) -> bool: ...
-
-
-if sys.version_info >= (3, 6):
- @runtime_checkable
- class Collection(Iterable[_T_co], Container[_T_co], Protocol[_T_co]):
- # Implement Sized (but don't have it as a base class).
- @abstractmethod
- def __len__(self) -> int: ...
-
- _Collection = Collection
-else:
- @runtime_checkable
- class _Collection(Iterable[_T_co], Container[_T_co], Protocol[_T_co]):
- # Implement Sized (but don't have it as a base class).
- @abstractmethod
- def __len__(self) -> int: ...
-
-class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]):
- @overload
- @abstractmethod
- def __getitem__(self, i: int) -> _T_co: ...
- @overload
- @abstractmethod
- def __getitem__(self, s: slice) -> Sequence[_T_co]: ...
- # Mixin methods
- def index(self, x: Any, start: int = ..., end: int = ...) -> int: ...
- def count(self, x: Any) -> int: ...
- def __contains__(self, x: object) -> bool: ...
- def __iter__(self) -> Iterator[_T_co]: ...
- def __reversed__(self) -> Iterator[_T_co]: ...
-
-class MutableSequence(Sequence[_T], Generic[_T]):
- @abstractmethod
- def insert(self, index: int, object: _T) -> None: ...
- @overload
- @abstractmethod
- def __getitem__(self, i: int) -> _T: ...
- @overload
- @abstractmethod
- def __getitem__(self, s: slice) -> MutableSequence[_T]: ...
- @overload
- @abstractmethod
- def __setitem__(self, i: int, o: _T) -> None: ...
- @overload
- @abstractmethod
- def __setitem__(self, s: slice, o: Iterable[_T]) -> None: ...
- @overload
- @abstractmethod
- def __delitem__(self, i: int) -> None: ...
- @overload
- @abstractmethod
- def __delitem__(self, i: slice) -> None: ...
- # Mixin methods
- def append(self, object: _T) -> None: ...
- def clear(self) -> None: ...
- def extend(self, iterable: Iterable[_T]) -> None: ...
- def reverse(self) -> None: ...
- def pop(self, index: int = ...) -> _T: ...
- def remove(self, object: _T) -> None: ...
- def __iadd__(self, x: Iterable[_T]) -> MutableSequence[_T]: ...
-
-class AbstractSet(_Collection[_T_co], Generic[_T_co]):
- @abstractmethod
- def __contains__(self, x: object) -> bool: ...
- # Mixin methods
- def __le__(self, s: AbstractSet[Any]) -> bool: ...
- def __lt__(self, s: AbstractSet[Any]) -> bool: ...
- def __gt__(self, s: AbstractSet[Any]) -> bool: ...
- def __ge__(self, s: AbstractSet[Any]) -> bool: ...
- def __and__(self, s: AbstractSet[Any]) -> AbstractSet[_T_co]: ...
- def __or__(self, s: AbstractSet[_T]) -> AbstractSet[Union[_T_co, _T]]: ...
- def __sub__(self, s: AbstractSet[Any]) -> AbstractSet[_T_co]: ...
- def __xor__(self, s: AbstractSet[_T]) -> AbstractSet[Union[_T_co, _T]]: ...
- def isdisjoint(self, s: Iterable[Any]) -> bool: ...
-
-class MutableSet(AbstractSet[_T], Generic[_T]):
- @abstractmethod
- def add(self, x: _T) -> None: ...
- @abstractmethod
- def discard(self, x: _T) -> None: ...
- # Mixin methods
- def clear(self) -> None: ...
- def pop(self) -> _T: ...
- def remove(self, element: _T) -> None: ...
- def __ior__(self, s: AbstractSet[_S]) -> MutableSet[Union[_T, _S]]: ...
- def __iand__(self, s: AbstractSet[Any]) -> MutableSet[_T]: ...
- def __ixor__(self, s: AbstractSet[_S]) -> MutableSet[Union[_T, _S]]: ...
- def __isub__(self, s: AbstractSet[Any]) -> MutableSet[_T]: ...
-
-class MappingView(Sized):
- def __init__(self, mapping: Mapping[_KT_co, _VT_co]) -> None: ... # undocumented
- def __len__(self) -> int: ...
-
-class ItemsView(MappingView, AbstractSet[Tuple[_KT_co, _VT_co]], Generic[_KT_co, _VT_co]):
- def __init__(self, mapping: Mapping[_KT_co, _VT_co]) -> None: ... # undocumented
- def __and__(self, o: Iterable[Any]) -> Set[Tuple[_KT_co, _VT_co]]: ...
- def __rand__(self, o: Iterable[_T]) -> Set[_T]: ...
- def __contains__(self, o: object) -> bool: ...
- def __iter__(self) -> Iterator[Tuple[_KT_co, _VT_co]]: ...
- if sys.version_info >= (3, 8):
- def __reversed__(self) -> Iterator[Tuple[_KT_co, _VT_co]]: ...
- def __or__(self, o: Iterable[_T]) -> Set[Union[Tuple[_KT_co, _VT_co], _T]]: ...
- def __ror__(self, o: Iterable[_T]) -> Set[Union[Tuple[_KT_co, _VT_co], _T]]: ...
- def __sub__(self, o: Iterable[Any]) -> Set[Tuple[_KT_co, _VT_co]]: ...
- def __rsub__(self, o: Iterable[_T]) -> Set[_T]: ...
- def __xor__(self, o: Iterable[_T]) -> Set[Union[Tuple[_KT_co, _VT_co], _T]]: ...
- def __rxor__(self, o: Iterable[_T]) -> Set[Union[Tuple[_KT_co, _VT_co], _T]]: ...
-
-class KeysView(MappingView, AbstractSet[_KT_co], Generic[_KT_co]):
- def __init__(self, mapping: Mapping[_KT_co, _VT_co]) -> None: ... # undocumented
- def __and__(self, o: Iterable[Any]) -> Set[_KT_co]: ...
- def __rand__(self, o: Iterable[_T]) -> Set[_T]: ...
- def __contains__(self, o: object) -> bool: ...
- def __iter__(self) -> Iterator[_KT_co]: ...
- if sys.version_info >= (3, 8):
- def __reversed__(self) -> Iterator[_KT_co]: ...
- def __or__(self, o: Iterable[_T]) -> Set[Union[_KT_co, _T]]: ...
- def __ror__(self, o: Iterable[_T]) -> Set[Union[_KT_co, _T]]: ...
- def __sub__(self, o: Iterable[Any]) -> Set[_KT_co]: ...
- def __rsub__(self, o: Iterable[_T]) -> Set[_T]: ...
- def __xor__(self, o: Iterable[_T]) -> Set[Union[_KT_co, _T]]: ...
- def __rxor__(self, o: Iterable[_T]) -> Set[Union[_KT_co, _T]]: ...
-
-class ValuesView(MappingView, Iterable[_VT_co], Generic[_VT_co]):
- def __init__(self, mapping: Mapping[_KT_co, _VT_co]) -> None: ... # undocumented
- def __contains__(self, o: object) -> bool: ...
- def __iter__(self) -> Iterator[_VT_co]: ...
- if sys.version_info >= (3, 8):
- def __reversed__(self) -> Iterator[_VT_co]: ...
-
-@runtime_checkable
-class ContextManager(Protocol[_T_co]):
- def __enter__(self) -> _T_co: ...
- def __exit__(self, __exc_type: Optional[Type[BaseException]],
- __exc_value: Optional[BaseException],
- __traceback: Optional[TracebackType]) -> Optional[bool]: ...
-
-@runtime_checkable
-class AsyncContextManager(Protocol[_T_co]):
- def __aenter__(self) -> Awaitable[_T_co]: ...
- def __aexit__(
- self,
- exc_type: Optional[Type[BaseException]],
- exc_value: Optional[BaseException],
- traceback: Optional[TracebackType],
- ) -> Awaitable[Optional[bool]]: ...
-
-class Mapping(_Collection[_KT], Generic[_KT, _VT_co]):
- # TODO: We wish the key type could also be covariant, but that doesn't work,
- # see discussion in https: //github.com/python/typing/pull/273.
- @abstractmethod
- def __getitem__(self, k: _KT) -> _VT_co:
- ...
- # Mixin methods
- @overload
- def get(self, k: _KT) -> Optional[_VT_co]: ...
- @overload
- def get(self, k: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]: ...
- def items(self) -> AbstractSet[Tuple[_KT, _VT_co]]: ...
- def keys(self) -> AbstractSet[_KT]: ...
- def values(self) -> ValuesView[_VT_co]: ...
- def __contains__(self, o: object) -> bool: ...
-
-class MutableMapping(Mapping[_KT, _VT], Generic[_KT, _VT]):
- @abstractmethod
- def __setitem__(self, k: _KT, v: _VT) -> None: ...
- @abstractmethod
- def __delitem__(self, v: _KT) -> None: ...
-
- def clear(self) -> None: ...
- @overload
- def pop(self, k: _KT) -> _VT: ...
- @overload
- def pop(self, k: _KT, default: Union[_VT, _T] = ...) -> Union[_VT, _T]: ...
- def popitem(self) -> Tuple[_KT, _VT]: ...
- def setdefault(self, k: _KT, default: _VT = ...) -> _VT: ...
- # 'update' used to take a Union, but using overloading is better.
- # The second overloaded type here is a bit too general, because
- # Mapping[Tuple[_KT, _VT], W] is a subclass of Iterable[Tuple[_KT, _VT]],
- # but will always have the behavior of the first overloaded type
- # at runtime, leading to keys of a mix of types _KT and Tuple[_KT, _VT].
- # We don't currently have any way of forcing all Mappings to use
- # the first overload, but by using overloading rather than a Union,
- # mypy will commit to using the first overload when the argument is
- # known to be a Mapping with unknown type parameters, which is closer
- # to the behavior we want. See mypy issue #1430.
- @overload
- def update(self, __m: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
- @overload
- def update(self, __m: Iterable[Tuple[_KT, _VT]], **kwargs: _VT) -> None: ...
- @overload
- def update(self, **kwargs: _VT) -> None: ...
-
-Text = str
-
-TYPE_CHECKING = True
-
-class IO(Iterator[AnyStr], Generic[AnyStr]):
- # TODO detach
- # TODO use abstract properties
- @property
- def mode(self) -> str: ...
- @property
- def name(self) -> str: ...
- @abstractmethod
- def close(self) -> None: ...
- @property
- def closed(self) -> bool: ...
- @abstractmethod
- def fileno(self) -> int: ...
- @abstractmethod
- def flush(self) -> None: ...
- @abstractmethod
- def isatty(self) -> bool: ...
- # TODO what if n is None?
- @abstractmethod
- def read(self, n: int = ...) -> AnyStr: ...
- @abstractmethod
- def readable(self) -> bool: ...
- @abstractmethod
- def readline(self, limit: int = ...) -> AnyStr: ...
- @abstractmethod
- def readlines(self, hint: int = ...) -> list[AnyStr]: ...
- @abstractmethod
- def seek(self, offset: int, whence: int = ...) -> int: ...
- @abstractmethod
- def seekable(self) -> bool: ...
- @abstractmethod
- def tell(self) -> int: ...
- @abstractmethod
- def truncate(self, size: Optional[int] = ...) -> int: ...
- @abstractmethod
- def writable(self) -> bool: ...
- # TODO buffer objects
- @abstractmethod
- def write(self, s: AnyStr) -> int: ...
- @abstractmethod
- def writelines(self, lines: Iterable[AnyStr]) -> None: ...
-
- @abstractmethod
- def __next__(self) -> AnyStr: ...
- @abstractmethod
- def __iter__(self) -> Iterator[AnyStr]: ...
- @abstractmethod
- def __enter__(self) -> IO[AnyStr]: ...
- @abstractmethod
- def __exit__(self, t: Optional[Type[BaseException]], value: Optional[BaseException],
- traceback: Optional[TracebackType]) -> Optional[bool]: ...
-
-class BinaryIO(IO[bytes]):
- # TODO readinto
- # TODO read1?
- # TODO peek?
- @overload
- @abstractmethod
- def write(self, s: bytearray) -> int: ...
- @overload
- @abstractmethod
- def write(self, s: bytes) -> int: ...
-
- @abstractmethod
- def __enter__(self) -> BinaryIO: ...
-
-class TextIO(IO[str]):
- # TODO use abstractproperty
- @property
- def buffer(self) -> BinaryIO: ...
- @property
- def encoding(self) -> str: ...
- @property
- def errors(self) -> Optional[str]: ...
- @property
- def line_buffering(self) -> int: ... # int on PyPy, bool on CPython
- @property
- def newlines(self) -> Any: ... # None, str or tuple
- @abstractmethod
- def __enter__(self) -> TextIO: ...
-
-class ByteString(Sequence[int], metaclass=ABCMeta): ...
-
-class Match(Generic[AnyStr]):
- pos: int
- endpos: int
- lastindex: Optional[int]
- lastgroup: Optional[AnyStr]
- string: AnyStr
-
- # The regular expression object whose match() or search() method produced
- # this match instance.
- re: Pattern[AnyStr]
-
- def expand(self, template: AnyStr) -> AnyStr: ...
-
- # TODO: The return for a group may be None, except if __group is 0 or not given.
- @overload
- def group(self, __group: Union[str, int] = ...) -> AnyStr: ...
- @overload
- def group(
- self,
- __group1: Union[str, int],
- __group2: Union[str, int],
- *groups: Union[str, int],
- ) -> Tuple[AnyStr, ...]: ...
-
- def groups(self, default: AnyStr = ...) -> Sequence[AnyStr]: ...
- def groupdict(self, default: AnyStr = ...) -> dict[str, AnyStr]: ...
- def start(self, group: Union[int, str] = ...) -> int: ...
- def end(self, group: Union[int, str] = ...) -> int: ...
- def span(self, group: Union[int, str] = ...) -> Tuple[int, int]: ...
- @property
- def regs(self) -> Tuple[Tuple[int, int], ...]: ... # undocumented
- if sys.version_info >= (3, 6):
- def __getitem__(self, g: Union[int, str]) -> AnyStr: ...
-
-class Pattern(Generic[AnyStr]):
- flags: int
- groupindex: Mapping[str, int]
- groups: int
- pattern: AnyStr
-
- def search(self, string: AnyStr, pos: int = ...,
- endpos: int = ...) -> Optional[Match[AnyStr]]: ...
- def match(self, string: AnyStr, pos: int = ...,
- endpos: int = ...) -> Optional[Match[AnyStr]]: ...
- # New in Python 3.4
- def fullmatch(self, string: AnyStr, pos: int = ...,
- endpos: int = ...) -> Optional[Match[AnyStr]]: ...
- def split(self, string: AnyStr, maxsplit: int = ...) -> list[AnyStr]: ...
- def findall(self, string: AnyStr, pos: int = ...,
- endpos: int = ...) -> list[Any]: ...
- def finditer(self, string: AnyStr, pos: int = ...,
- endpos: int = ...) -> Iterator[Match[AnyStr]]: ...
-
- @overload
- def sub(self, repl: AnyStr, string: AnyStr,
- count: int = ...) -> AnyStr: ...
- @overload
- def sub(self, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr,
- count: int = ...) -> AnyStr: ...
-
- @overload
- def subn(self, repl: AnyStr, string: AnyStr,
- count: int = ...) -> Tuple[AnyStr, int]: ...
- @overload
- def subn(self, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr,
- count: int = ...) -> Tuple[AnyStr, int]: ...
-
-# Functions
-
-def get_type_hints(
- obj: Callable[..., Any], globalns: Optional[Dict[str, Any]] = ..., localns: Optional[Dict[str, Any]] = ...,
-) -> Dict[str, Any]: ...
-if sys.version_info >= (3, 8):
- def get_origin(tp: Any) -> Optional[Any]: ...
- def get_args(tp: Any) -> Tuple[Any, ...]: ...
-
-@overload
-def cast(tp: Type[_T], obj: Any) -> _T: ...
-@overload
-def cast(tp: str, obj: Any) -> Any: ...
-
-# Type constructors
-
-# NamedTuple is special-cased in the type checker
-class NamedTuple(Tuple[Any, ...]):
- _field_types: collections.OrderedDict[str, Type[Any]]
- _field_defaults: Dict[str, Any] = ...
- _fields: Tuple[str, ...]
- _source: str
-
- def __init__(self, typename: str, fields: Iterable[Tuple[str, Any]] = ...,
- **kwargs: Any) -> None: ...
-
- @classmethod
- def _make(cls: Type[_T], iterable: Iterable[Any]) -> _T: ...
-
- if sys.version_info >= (3, 8):
- def _asdict(self) -> Dict[str, Any]: ...
- else:
- def _asdict(self) -> collections.OrderedDict[str, Any]: ...
- def _replace(self: _T, **kwargs: Any) -> _T: ...
-
-# Internal mypy fallback type for all typed dicts (does not exist at runtime)
-class _TypedDict(Mapping[str, object], metaclass=ABCMeta):
- def copy(self: _T) -> _T: ...
- # Using NoReturn so that only calls using mypy plugin hook that specialize the signature
- # can go through.
- def setdefault(self, k: NoReturn, default: object) -> object: ...
- # Mypy plugin hook for 'pop' expects that 'default' has a type variable type.
- def pop(self, k: NoReturn, default: _T = ...) -> object: ...
- def update(self: _T, __m: _T) -> None: ...
- def __delitem__(self, k: NoReturn) -> None: ...
-
-def NewType(name: str, tp: Type[_T]) -> Type[_T]: ...
-
-# This itself is only available during type checking
-def type_check_only(func_or_cls: _C) -> _C: ...
diff --git a/typeshed/3/arcp/__init__.pyi b/typeshed/arcp/__init__.pyi
similarity index 100%
rename from typeshed/3/arcp/__init__.pyi
rename to typeshed/arcp/__init__.pyi
diff --git a/typeshed/3/arcp/generate.pyi b/typeshed/arcp/generate.pyi
similarity index 100%
rename from typeshed/3/arcp/generate.pyi
rename to typeshed/arcp/generate.pyi
diff --git a/typeshed/3/arcp/parse.pyi b/typeshed/arcp/parse.pyi
similarity index 100%
rename from typeshed/3/arcp/parse.pyi
rename to typeshed/arcp/parse.pyi
diff --git a/typeshed/3/argcomplete/__init__.pyi b/typeshed/argcomplete/__init__.pyi
similarity index 100%
rename from typeshed/3/argcomplete/__init__.pyi
rename to typeshed/argcomplete/__init__.pyi
diff --git a/typeshed/3/bagit.pyi b/typeshed/bagit.pyi
similarity index 100%
rename from typeshed/3/bagit.pyi
rename to typeshed/bagit.pyi
diff --git a/typeshed/2and3/cachecontrol/__init__.pyi b/typeshed/cachecontrol/__init__.pyi
similarity index 100%
rename from typeshed/2and3/cachecontrol/__init__.pyi
rename to typeshed/cachecontrol/__init__.pyi
diff --git a/typeshed/2and3/cachecontrol/adapter.pyi b/typeshed/cachecontrol/adapter.pyi
similarity index 100%
rename from typeshed/2and3/cachecontrol/adapter.pyi
rename to typeshed/cachecontrol/adapter.pyi
diff --git a/typeshed/2and3/cachecontrol/cache.pyi b/typeshed/cachecontrol/cache.pyi
similarity index 100%
rename from typeshed/2and3/cachecontrol/cache.pyi
rename to typeshed/cachecontrol/cache.pyi
diff --git a/typeshed/2and3/cachecontrol/caches/__init__.pyi b/typeshed/cachecontrol/caches/__init__.pyi
similarity index 100%
rename from typeshed/2and3/cachecontrol/caches/__init__.pyi
rename to typeshed/cachecontrol/caches/__init__.pyi
diff --git a/typeshed/2and3/cachecontrol/caches/file_cache.pyi b/typeshed/cachecontrol/caches/file_cache.pyi
similarity index 100%
rename from typeshed/2and3/cachecontrol/caches/file_cache.pyi
rename to typeshed/cachecontrol/caches/file_cache.pyi
diff --git a/typeshed/2and3/cachecontrol/compat.pyi b/typeshed/cachecontrol/compat.pyi
similarity index 100%
rename from typeshed/2and3/cachecontrol/compat.pyi
rename to typeshed/cachecontrol/compat.pyi
diff --git a/typeshed/2and3/cachecontrol/controller.pyi b/typeshed/cachecontrol/controller.pyi
similarity index 100%
rename from typeshed/2and3/cachecontrol/controller.pyi
rename to typeshed/cachecontrol/controller.pyi
diff --git a/typeshed/2and3/cachecontrol/filewrapper.pyi b/typeshed/cachecontrol/filewrapper.pyi
similarity index 100%
rename from typeshed/2and3/cachecontrol/filewrapper.pyi
rename to typeshed/cachecontrol/filewrapper.pyi
diff --git a/typeshed/2and3/cachecontrol/serialize.pyi b/typeshed/cachecontrol/serialize.pyi
similarity index 100%
rename from typeshed/2and3/cachecontrol/serialize.pyi
rename to typeshed/cachecontrol/serialize.pyi
diff --git a/typeshed/2and3/cachecontrol/wrapper.pyi b/typeshed/cachecontrol/wrapper.pyi
similarity index 100%
rename from typeshed/2and3/cachecontrol/wrapper.pyi
rename to typeshed/cachecontrol/wrapper.pyi
diff --git a/typeshed/2and3/coloredlogs/__init__.pyi b/typeshed/coloredlogs/__init__.pyi
similarity index 100%
rename from typeshed/2and3/coloredlogs/__init__.pyi
rename to typeshed/coloredlogs/__init__.pyi
diff --git a/typeshed/2and3/galaxy/__init__.pyi b/typeshed/galaxy/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/__init__.pyi
rename to typeshed/galaxy/__init__.pyi
diff --git a/typeshed/2and3/galaxy/exceptions/__init__.pyi b/typeshed/galaxy/exceptions/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/exceptions/__init__.pyi
rename to typeshed/galaxy/exceptions/__init__.pyi
diff --git a/typeshed/2and3/galaxy/exceptions/error_codes.pyi b/typeshed/galaxy/exceptions/error_codes.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/exceptions/error_codes.pyi
rename to typeshed/galaxy/exceptions/error_codes.pyi
diff --git a/typeshed/2and3/galaxy/jobs/__init__.pyi b/typeshed/galaxy/jobs/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/__init__.pyi
rename to typeshed/galaxy/jobs/__init__.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/__init__.pyi b/typeshed/galaxy/jobs/metrics/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/__init__.pyi
rename to typeshed/galaxy/jobs/metrics/__init__.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/collectl/__init__.pyi b/typeshed/galaxy/jobs/metrics/collectl/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/collectl/__init__.pyi
rename to typeshed/galaxy/jobs/metrics/collectl/__init__.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/collectl/cli.pyi b/typeshed/galaxy/jobs/metrics/collectl/cli.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/collectl/cli.pyi
rename to typeshed/galaxy/jobs/metrics/collectl/cli.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/collectl/processes.pyi b/typeshed/galaxy/jobs/metrics/collectl/processes.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/collectl/processes.pyi
rename to typeshed/galaxy/jobs/metrics/collectl/processes.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/collectl/stats.pyi b/typeshed/galaxy/jobs/metrics/collectl/stats.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/collectl/stats.pyi
rename to typeshed/galaxy/jobs/metrics/collectl/stats.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/collectl/subsystems.pyi b/typeshed/galaxy/jobs/metrics/collectl/subsystems.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/collectl/subsystems.pyi
rename to typeshed/galaxy/jobs/metrics/collectl/subsystems.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/formatting.pyi b/typeshed/galaxy/jobs/metrics/formatting.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/formatting.pyi
rename to typeshed/galaxy/jobs/metrics/formatting.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/instrumenters/__init__.pyi b/typeshed/galaxy/jobs/metrics/instrumenters/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/instrumenters/__init__.pyi
rename to typeshed/galaxy/jobs/metrics/instrumenters/__init__.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/instrumenters/collectl.pyi b/typeshed/galaxy/jobs/metrics/instrumenters/collectl.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/instrumenters/collectl.pyi
rename to typeshed/galaxy/jobs/metrics/instrumenters/collectl.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/instrumenters/core.pyi b/typeshed/galaxy/jobs/metrics/instrumenters/core.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/instrumenters/core.pyi
rename to typeshed/galaxy/jobs/metrics/instrumenters/core.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/instrumenters/cpuinfo.pyi b/typeshed/galaxy/jobs/metrics/instrumenters/cpuinfo.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/instrumenters/cpuinfo.pyi
rename to typeshed/galaxy/jobs/metrics/instrumenters/cpuinfo.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/instrumenters/env.pyi b/typeshed/galaxy/jobs/metrics/instrumenters/env.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/instrumenters/env.pyi
rename to typeshed/galaxy/jobs/metrics/instrumenters/env.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/instrumenters/meminfo.pyi b/typeshed/galaxy/jobs/metrics/instrumenters/meminfo.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/instrumenters/meminfo.pyi
rename to typeshed/galaxy/jobs/metrics/instrumenters/meminfo.pyi
diff --git a/typeshed/2and3/galaxy/jobs/metrics/instrumenters/uname.pyi b/typeshed/galaxy/jobs/metrics/instrumenters/uname.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/jobs/metrics/instrumenters/uname.pyi
rename to typeshed/galaxy/jobs/metrics/instrumenters/uname.pyi
diff --git a/typeshed/2and3/galaxy/objectstore/__init__.pyi b/typeshed/galaxy/objectstore/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/objectstore/__init__.pyi
rename to typeshed/galaxy/objectstore/__init__.pyi
diff --git a/typeshed/2and3/galaxy/objectstore/azure_blob.pyi b/typeshed/galaxy/objectstore/azure_blob.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/objectstore/azure_blob.pyi
rename to typeshed/galaxy/objectstore/azure_blob.pyi
diff --git a/typeshed/2and3/galaxy/objectstore/pulsar.pyi b/typeshed/galaxy/objectstore/pulsar.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/objectstore/pulsar.pyi
rename to typeshed/galaxy/objectstore/pulsar.pyi
diff --git a/typeshed/2and3/galaxy/objectstore/rods.pyi b/typeshed/galaxy/objectstore/rods.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/objectstore/rods.pyi
rename to typeshed/galaxy/objectstore/rods.pyi
diff --git a/typeshed/2and3/galaxy/objectstore/s3.pyi b/typeshed/galaxy/objectstore/s3.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/objectstore/s3.pyi
rename to typeshed/galaxy/objectstore/s3.pyi
diff --git a/typeshed/2and3/galaxy/objectstore/s3_multipart_upload.pyi b/typeshed/galaxy/objectstore/s3_multipart_upload.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/objectstore/s3_multipart_upload.pyi
rename to typeshed/galaxy/objectstore/s3_multipart_upload.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/__init__.pyi b/typeshed/galaxy/tool_util/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/__init__.pyi
rename to typeshed/galaxy/tool_util/__init__.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/cwl/__init__.pyi b/typeshed/galaxy/tool_util/cwl/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/cwl/__init__.pyi
rename to typeshed/galaxy/tool_util/cwl/__init__.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/cwl/cwltool_deps.pyi b/typeshed/galaxy/tool_util/cwl/cwltool_deps.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/cwl/cwltool_deps.pyi
rename to typeshed/galaxy/tool_util/cwl/cwltool_deps.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/cwl/parser.pyi b/typeshed/galaxy/tool_util/cwl/parser.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/cwl/parser.pyi
rename to typeshed/galaxy/tool_util/cwl/parser.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/cwl/representation.pyi b/typeshed/galaxy/tool_util/cwl/representation.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/cwl/representation.pyi
rename to typeshed/galaxy/tool_util/cwl/representation.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/cwl/runtime_actions.pyi b/typeshed/galaxy/tool_util/cwl/runtime_actions.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/cwl/runtime_actions.pyi
rename to typeshed/galaxy/tool_util/cwl/runtime_actions.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/cwl/schema.pyi b/typeshed/galaxy/tool_util/cwl/schema.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/cwl/schema.pyi
rename to typeshed/galaxy/tool_util/cwl/schema.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/__init__.pyi b/typeshed/galaxy/tool_util/deps/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/__init__.pyi
rename to typeshed/galaxy/tool_util/deps/__init__.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/brew_exts.pyi b/typeshed/galaxy/tool_util/deps/brew_exts.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/brew_exts.pyi
rename to typeshed/galaxy/tool_util/deps/brew_exts.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/brew_util.pyi b/typeshed/galaxy/tool_util/deps/brew_util.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/brew_util.pyi
rename to typeshed/galaxy/tool_util/deps/brew_util.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/commands.pyi b/typeshed/galaxy/tool_util/deps/commands.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/commands.pyi
rename to typeshed/galaxy/tool_util/deps/commands.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/conda_compat.pyi b/typeshed/galaxy/tool_util/deps/conda_compat.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/conda_compat.pyi
rename to typeshed/galaxy/tool_util/deps/conda_compat.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/conda_util.pyi b/typeshed/galaxy/tool_util/deps/conda_util.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/conda_util.pyi
rename to typeshed/galaxy/tool_util/deps/conda_util.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/container_resolvers/__init__.pyi b/typeshed/galaxy/tool_util/deps/container_resolvers/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/container_resolvers/__init__.pyi
rename to typeshed/galaxy/tool_util/deps/container_resolvers/__init__.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/container_resolvers/explicit.pyi b/typeshed/galaxy/tool_util/deps/container_resolvers/explicit.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/container_resolvers/explicit.pyi
rename to typeshed/galaxy/tool_util/deps/container_resolvers/explicit.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/container_resolvers/mulled.pyi b/typeshed/galaxy/tool_util/deps/container_resolvers/mulled.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/container_resolvers/mulled.pyi
rename to typeshed/galaxy/tool_util/deps/container_resolvers/mulled.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/containers.pyi b/typeshed/galaxy/tool_util/deps/containers.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/containers.pyi
rename to typeshed/galaxy/tool_util/deps/containers.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/dependencies.pyi b/typeshed/galaxy/tool_util/deps/dependencies.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/dependencies.pyi
rename to typeshed/galaxy/tool_util/deps/dependencies.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/docker_util.pyi b/typeshed/galaxy/tool_util/deps/docker_util.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/docker_util.pyi
rename to typeshed/galaxy/tool_util/deps/docker_util.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/dockerfiles.pyi b/typeshed/galaxy/tool_util/deps/dockerfiles.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/dockerfiles.pyi
rename to typeshed/galaxy/tool_util/deps/dockerfiles.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/installable.pyi b/typeshed/galaxy/tool_util/deps/installable.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/installable.pyi
rename to typeshed/galaxy/tool_util/deps/installable.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/mulled/__init__.pyi b/typeshed/galaxy/tool_util/deps/mulled/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/mulled/__init__.pyi
rename to typeshed/galaxy/tool_util/deps/mulled/__init__.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/mulled/_cli.pyi b/typeshed/galaxy/tool_util/deps/mulled/_cli.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/mulled/_cli.pyi
rename to typeshed/galaxy/tool_util/deps/mulled/_cli.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/mulled/mulled_build.pyi b/typeshed/galaxy/tool_util/deps/mulled/mulled_build.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/mulled/mulled_build.pyi
rename to typeshed/galaxy/tool_util/deps/mulled/mulled_build.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/mulled/mulled_build_channel.pyi b/typeshed/galaxy/tool_util/deps/mulled/mulled_build_channel.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/mulled/mulled_build_channel.pyi
rename to typeshed/galaxy/tool_util/deps/mulled/mulled_build_channel.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/mulled/mulled_build_files.pyi b/typeshed/galaxy/tool_util/deps/mulled/mulled_build_files.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/mulled/mulled_build_files.pyi
rename to typeshed/galaxy/tool_util/deps/mulled/mulled_build_files.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/mulled/mulled_build_tool.pyi b/typeshed/galaxy/tool_util/deps/mulled/mulled_build_tool.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/mulled/mulled_build_tool.pyi
rename to typeshed/galaxy/tool_util/deps/mulled/mulled_build_tool.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/mulled/mulled_search.pyi b/typeshed/galaxy/tool_util/deps/mulled/mulled_search.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/mulled/mulled_search.pyi
rename to typeshed/galaxy/tool_util/deps/mulled/mulled_search.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/mulled/util.pyi b/typeshed/galaxy/tool_util/deps/mulled/util.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/mulled/util.pyi
rename to typeshed/galaxy/tool_util/deps/mulled/util.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/requirements.pyi b/typeshed/galaxy/tool_util/deps/requirements.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/requirements.pyi
rename to typeshed/galaxy/tool_util/deps/requirements.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/resolvers/__init__.pyi b/typeshed/galaxy/tool_util/deps/resolvers/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/resolvers/__init__.pyi
rename to typeshed/galaxy/tool_util/deps/resolvers/__init__.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/resolvers/brewed_tool_shed_packages.pyi b/typeshed/galaxy/tool_util/deps/resolvers/brewed_tool_shed_packages.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/resolvers/brewed_tool_shed_packages.pyi
rename to typeshed/galaxy/tool_util/deps/resolvers/brewed_tool_shed_packages.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/resolvers/conda.pyi b/typeshed/galaxy/tool_util/deps/resolvers/conda.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/resolvers/conda.pyi
rename to typeshed/galaxy/tool_util/deps/resolvers/conda.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/resolvers/galaxy_packages.pyi b/typeshed/galaxy/tool_util/deps/resolvers/galaxy_packages.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/resolvers/galaxy_packages.pyi
rename to typeshed/galaxy/tool_util/deps/resolvers/galaxy_packages.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/resolvers/homebrew.pyi b/typeshed/galaxy/tool_util/deps/resolvers/homebrew.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/resolvers/homebrew.pyi
rename to typeshed/galaxy/tool_util/deps/resolvers/homebrew.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/resolvers/modules.pyi b/typeshed/galaxy/tool_util/deps/resolvers/modules.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/resolvers/modules.pyi
rename to typeshed/galaxy/tool_util/deps/resolvers/modules.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/resolvers/resolver_mixins.pyi b/typeshed/galaxy/tool_util/deps/resolvers/resolver_mixins.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/resolvers/resolver_mixins.pyi
rename to typeshed/galaxy/tool_util/deps/resolvers/resolver_mixins.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/resolvers/tool_shed_packages.pyi b/typeshed/galaxy/tool_util/deps/resolvers/tool_shed_packages.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/resolvers/tool_shed_packages.pyi
rename to typeshed/galaxy/tool_util/deps/resolvers/tool_shed_packages.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/resolvers/unlinked_tool_shed_packages.pyi b/typeshed/galaxy/tool_util/deps/resolvers/unlinked_tool_shed_packages.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/resolvers/unlinked_tool_shed_packages.pyi
rename to typeshed/galaxy/tool_util/deps/resolvers/unlinked_tool_shed_packages.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/singularity_util.pyi b/typeshed/galaxy/tool_util/deps/singularity_util.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/singularity_util.pyi
rename to typeshed/galaxy/tool_util/deps/singularity_util.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/deps/views.pyi b/typeshed/galaxy/tool_util/deps/views.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/deps/views.pyi
rename to typeshed/galaxy/tool_util/deps/views.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/fetcher.pyi b/typeshed/galaxy/tool_util/fetcher.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/fetcher.pyi
rename to typeshed/galaxy/tool_util/fetcher.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/lint.pyi b/typeshed/galaxy/tool_util/lint.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/lint.pyi
rename to typeshed/galaxy/tool_util/lint.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/lint_util.pyi b/typeshed/galaxy/tool_util/lint_util.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/lint_util.pyi
rename to typeshed/galaxy/tool_util/lint_util.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/linters/__init__.pyi b/typeshed/galaxy/tool_util/linters/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/linters/__init__.pyi
rename to typeshed/galaxy/tool_util/linters/__init__.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/linters/citations.pyi b/typeshed/galaxy/tool_util/linters/citations.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/linters/citations.pyi
rename to typeshed/galaxy/tool_util/linters/citations.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/linters/command.pyi b/typeshed/galaxy/tool_util/linters/command.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/linters/command.pyi
rename to typeshed/galaxy/tool_util/linters/command.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/linters/cwl.pyi b/typeshed/galaxy/tool_util/linters/cwl.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/linters/cwl.pyi
rename to typeshed/galaxy/tool_util/linters/cwl.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/linters/general.pyi b/typeshed/galaxy/tool_util/linters/general.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/linters/general.pyi
rename to typeshed/galaxy/tool_util/linters/general.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/linters/help.pyi b/typeshed/galaxy/tool_util/linters/help.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/linters/help.pyi
rename to typeshed/galaxy/tool_util/linters/help.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/linters/inputs.pyi b/typeshed/galaxy/tool_util/linters/inputs.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/linters/inputs.pyi
rename to typeshed/galaxy/tool_util/linters/inputs.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/linters/outputs.pyi b/typeshed/galaxy/tool_util/linters/outputs.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/linters/outputs.pyi
rename to typeshed/galaxy/tool_util/linters/outputs.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/linters/stdio.pyi b/typeshed/galaxy/tool_util/linters/stdio.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/linters/stdio.pyi
rename to typeshed/galaxy/tool_util/linters/stdio.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/linters/tests.pyi b/typeshed/galaxy/tool_util/linters/tests.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/linters/tests.pyi
rename to typeshed/galaxy/tool_util/linters/tests.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/linters/xml_order.pyi b/typeshed/galaxy/tool_util/linters/xml_order.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/linters/xml_order.pyi
rename to typeshed/galaxy/tool_util/linters/xml_order.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/loader.pyi b/typeshed/galaxy/tool_util/loader.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/loader.pyi
rename to typeshed/galaxy/tool_util/loader.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/loader_directory.pyi b/typeshed/galaxy/tool_util/loader_directory.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/loader_directory.pyi
rename to typeshed/galaxy/tool_util/loader_directory.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/locations/__init__.pyi b/typeshed/galaxy/tool_util/locations/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/locations/__init__.pyi
rename to typeshed/galaxy/tool_util/locations/__init__.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/locations/dockstore.pyi b/typeshed/galaxy/tool_util/locations/dockstore.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/locations/dockstore.pyi
rename to typeshed/galaxy/tool_util/locations/dockstore.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/locations/file.pyi b/typeshed/galaxy/tool_util/locations/file.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/locations/file.pyi
rename to typeshed/galaxy/tool_util/locations/file.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/locations/http.pyi b/typeshed/galaxy/tool_util/locations/http.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/locations/http.pyi
rename to typeshed/galaxy/tool_util/locations/http.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/parser/__init__.pyi b/typeshed/galaxy/tool_util/parser/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/parser/__init__.pyi
rename to typeshed/galaxy/tool_util/parser/__init__.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/parser/cwl.pyi b/typeshed/galaxy/tool_util/parser/cwl.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/parser/cwl.pyi
rename to typeshed/galaxy/tool_util/parser/cwl.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/parser/factory.pyi b/typeshed/galaxy/tool_util/parser/factory.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/parser/factory.pyi
rename to typeshed/galaxy/tool_util/parser/factory.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/parser/interface.pyi b/typeshed/galaxy/tool_util/parser/interface.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/parser/interface.pyi
rename to typeshed/galaxy/tool_util/parser/interface.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/parser/output_actions.pyi b/typeshed/galaxy/tool_util/parser/output_actions.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/parser/output_actions.pyi
rename to typeshed/galaxy/tool_util/parser/output_actions.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/parser/output_collection_def.pyi b/typeshed/galaxy/tool_util/parser/output_collection_def.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/parser/output_collection_def.pyi
rename to typeshed/galaxy/tool_util/parser/output_collection_def.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/parser/output_objects.pyi b/typeshed/galaxy/tool_util/parser/output_objects.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/parser/output_objects.pyi
rename to typeshed/galaxy/tool_util/parser/output_objects.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/parser/util.pyi b/typeshed/galaxy/tool_util/parser/util.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/parser/util.pyi
rename to typeshed/galaxy/tool_util/parser/util.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/parser/xml.pyi b/typeshed/galaxy/tool_util/parser/xml.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/parser/xml.pyi
rename to typeshed/galaxy/tool_util/parser/xml.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/parser/yaml.pyi b/typeshed/galaxy/tool_util/parser/yaml.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/parser/yaml.pyi
rename to typeshed/galaxy/tool_util/parser/yaml.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/verify/__init__.pyi b/typeshed/galaxy/tool_util/verify/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/verify/__init__.pyi
rename to typeshed/galaxy/tool_util/verify/__init__.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/verify/asserts/__init__.pyi b/typeshed/galaxy/tool_util/verify/asserts/__init__.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/verify/asserts/__init__.pyi
rename to typeshed/galaxy/tool_util/verify/asserts/__init__.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/verify/asserts/tabular.pyi b/typeshed/galaxy/tool_util/verify/asserts/tabular.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/verify/asserts/tabular.pyi
rename to typeshed/galaxy/tool_util/verify/asserts/tabular.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/verify/asserts/text.pyi b/typeshed/galaxy/tool_util/verify/asserts/text.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/verify/asserts/text.pyi
rename to typeshed/galaxy/tool_util/verify/asserts/text.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/verify/asserts/xml.pyi b/typeshed/galaxy/tool_util/verify/asserts/xml.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/verify/asserts/xml.pyi
rename to typeshed/galaxy/tool_util/verify/asserts/xml.pyi
diff --git a/typeshed/2and3/galaxy/tool_util/verify/test_data.pyi b/typeshed/galaxy/tool_util/verify/test_data.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/tool_util/verify/test_data.pyi
rename to typeshed/galaxy/tool_util/verify/test_data.pyi
diff --git a/typeshed/2and3/galaxy/util/__init__.pyi b/typeshed/galaxy/util/__init__.pyi
similarity index 98%
rename from typeshed/2and3/galaxy/util/__init__.pyi
rename to typeshed/galaxy/util/__init__.pyi
index d2c9a1ee58..d0e0292f3e 100644
--- a/typeshed/2and3/galaxy/util/__init__.pyi
+++ b/typeshed/galaxy/util/__init__.pyi
@@ -4,7 +4,6 @@
from typing import Any, Optional
import collections
-from six.moves.urllib import parse as urlparse, request as urlrequest
from .inflection import English as English, Inflector as Inflector
grp = ... # type: Any
diff --git a/typeshed/2and3/galaxy/util/aliaspickler.pyi b/typeshed/galaxy/util/aliaspickler.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/aliaspickler.pyi
rename to typeshed/galaxy/util/aliaspickler.pyi
diff --git a/typeshed/2and3/galaxy/util/bunch.pyi b/typeshed/galaxy/util/bunch.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/bunch.pyi
rename to typeshed/galaxy/util/bunch.pyi
diff --git a/typeshed/2and3/galaxy/util/checkers.pyi b/typeshed/galaxy/util/checkers.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/checkers.pyi
rename to typeshed/galaxy/util/checkers.pyi
diff --git a/typeshed/2and3/galaxy/util/compression_utils.pyi b/typeshed/galaxy/util/compression_utils.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/compression_utils.pyi
rename to typeshed/galaxy/util/compression_utils.pyi
diff --git a/typeshed/2and3/galaxy/util/dictifiable.pyi b/typeshed/galaxy/util/dictifiable.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/dictifiable.pyi
rename to typeshed/galaxy/util/dictifiable.pyi
diff --git a/typeshed/2and3/galaxy/util/expressions.pyi b/typeshed/galaxy/util/expressions.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/expressions.pyi
rename to typeshed/galaxy/util/expressions.pyi
diff --git a/typeshed/2and3/galaxy/util/filelock.pyi b/typeshed/galaxy/util/filelock.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/filelock.pyi
rename to typeshed/galaxy/util/filelock.pyi
diff --git a/typeshed/2and3/galaxy/util/hash_util.pyi b/typeshed/galaxy/util/hash_util.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/hash_util.pyi
rename to typeshed/galaxy/util/hash_util.pyi
diff --git a/typeshed/2and3/galaxy/util/heartbeat.pyi b/typeshed/galaxy/util/heartbeat.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/heartbeat.pyi
rename to typeshed/galaxy/util/heartbeat.pyi
diff --git a/typeshed/2and3/galaxy/util/image_util.pyi b/typeshed/galaxy/util/image_util.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/image_util.pyi
rename to typeshed/galaxy/util/image_util.pyi
diff --git a/typeshed/2and3/galaxy/util/inflection.pyi b/typeshed/galaxy/util/inflection.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/inflection.pyi
rename to typeshed/galaxy/util/inflection.pyi
diff --git a/typeshed/2and3/galaxy/util/json.pyi b/typeshed/galaxy/util/json.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/json.pyi
rename to typeshed/galaxy/util/json.pyi
diff --git a/typeshed/2and3/galaxy/util/lazy_process.pyi b/typeshed/galaxy/util/lazy_process.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/lazy_process.pyi
rename to typeshed/galaxy/util/lazy_process.pyi
diff --git a/typeshed/2and3/galaxy/util/object_wrapper.pyi b/typeshed/galaxy/util/object_wrapper.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/object_wrapper.pyi
rename to typeshed/galaxy/util/object_wrapper.pyi
diff --git a/typeshed/2and3/galaxy/util/odict.pyi b/typeshed/galaxy/util/odict.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/odict.pyi
rename to typeshed/galaxy/util/odict.pyi
diff --git a/typeshed/2and3/galaxy/util/oset.pyi b/typeshed/galaxy/util/oset.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/oset.pyi
rename to typeshed/galaxy/util/oset.pyi
diff --git a/typeshed/2and3/galaxy/util/plugin_config.pyi b/typeshed/galaxy/util/plugin_config.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/plugin_config.pyi
rename to typeshed/galaxy/util/plugin_config.pyi
diff --git a/typeshed/2and3/galaxy/util/properties.pyi b/typeshed/galaxy/util/properties.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/properties.pyi
rename to typeshed/galaxy/util/properties.pyi
diff --git a/typeshed/2and3/galaxy/util/simplegraph.pyi b/typeshed/galaxy/util/simplegraph.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/simplegraph.pyi
rename to typeshed/galaxy/util/simplegraph.pyi
diff --git a/typeshed/2and3/galaxy/util/sleeper.pyi b/typeshed/galaxy/util/sleeper.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/sleeper.pyi
rename to typeshed/galaxy/util/sleeper.pyi
diff --git a/typeshed/2and3/galaxy/util/sockets.pyi b/typeshed/galaxy/util/sockets.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/sockets.pyi
rename to typeshed/galaxy/util/sockets.pyi
diff --git a/typeshed/2and3/galaxy/util/specs.pyi b/typeshed/galaxy/util/specs.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/specs.pyi
rename to typeshed/galaxy/util/specs.pyi
diff --git a/typeshed/2and3/galaxy/util/sqlite.pyi b/typeshed/galaxy/util/sqlite.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/sqlite.pyi
rename to typeshed/galaxy/util/sqlite.pyi
diff --git a/typeshed/2and3/galaxy/util/submodules.pyi b/typeshed/galaxy/util/submodules.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/submodules.pyi
rename to typeshed/galaxy/util/submodules.pyi
diff --git a/typeshed/2and3/galaxy/util/topsort.pyi b/typeshed/galaxy/util/topsort.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/topsort.pyi
rename to typeshed/galaxy/util/topsort.pyi
diff --git a/typeshed/2and3/galaxy/util/xml_macros.pyi b/typeshed/galaxy/util/xml_macros.pyi
similarity index 100%
rename from typeshed/2and3/galaxy/util/xml_macros.pyi
rename to typeshed/galaxy/util/xml_macros.pyi
diff --git a/typeshed/2and3/graphviz/__init__.pyi b/typeshed/graphviz/__init__.pyi
similarity index 100%
rename from typeshed/2and3/graphviz/__init__.pyi
rename to typeshed/graphviz/__init__.pyi
diff --git a/typeshed/2and3/graphviz/_compat.pyi b/typeshed/graphviz/_compat.pyi
similarity index 100%
rename from typeshed/2and3/graphviz/_compat.pyi
rename to typeshed/graphviz/_compat.pyi
diff --git a/typeshed/2and3/graphviz/backend.pyi b/typeshed/graphviz/backend.pyi
similarity index 100%
rename from typeshed/2and3/graphviz/backend.pyi
rename to typeshed/graphviz/backend.pyi
diff --git a/typeshed/2and3/graphviz/dot.pyi b/typeshed/graphviz/dot.pyi
similarity index 100%
rename from typeshed/2and3/graphviz/dot.pyi
rename to typeshed/graphviz/dot.pyi
diff --git a/typeshed/2and3/graphviz/files.pyi b/typeshed/graphviz/files.pyi
similarity index 100%
rename from typeshed/2and3/graphviz/files.pyi
rename to typeshed/graphviz/files.pyi
diff --git a/typeshed/2and3/graphviz/lang.pyi b/typeshed/graphviz/lang.pyi
similarity index 100%
rename from typeshed/2and3/graphviz/lang.pyi
rename to typeshed/graphviz/lang.pyi
diff --git a/typeshed/2and3/graphviz/tools.pyi b/typeshed/graphviz/tools.pyi
similarity index 100%
rename from typeshed/2and3/graphviz/tools.pyi
rename to typeshed/graphviz/tools.pyi
diff --git a/typeshed/2and3/mistune.pyi b/typeshed/mistune.pyi
similarity index 100%
rename from typeshed/2and3/mistune.pyi
rename to typeshed/mistune.pyi
diff --git a/typeshed/2and3/networkx/__init__.pyi b/typeshed/networkx/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/__init__.pyi
rename to typeshed/networkx/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/__init__.pyi b/typeshed/networkx/algorithms/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/__init__.pyi
rename to typeshed/networkx/algorithms/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/approximation/__init__.pyi b/typeshed/networkx/algorithms/approximation/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/approximation/__init__.pyi
rename to typeshed/networkx/algorithms/approximation/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/approximation/clique.pyi b/typeshed/networkx/algorithms/approximation/clique.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/approximation/clique.pyi
rename to typeshed/networkx/algorithms/approximation/clique.pyi
diff --git a/typeshed/2and3/networkx/algorithms/approximation/clustering_coefficient.pyi b/typeshed/networkx/algorithms/approximation/clustering_coefficient.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/approximation/clustering_coefficient.pyi
rename to typeshed/networkx/algorithms/approximation/clustering_coefficient.pyi
diff --git a/typeshed/2and3/networkx/algorithms/approximation/connectivity.pyi b/typeshed/networkx/algorithms/approximation/connectivity.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/approximation/connectivity.pyi
rename to typeshed/networkx/algorithms/approximation/connectivity.pyi
diff --git a/typeshed/2and3/networkx/algorithms/approximation/dominating_set.pyi b/typeshed/networkx/algorithms/approximation/dominating_set.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/approximation/dominating_set.pyi
rename to typeshed/networkx/algorithms/approximation/dominating_set.pyi
diff --git a/typeshed/2and3/networkx/algorithms/approximation/independent_set.pyi b/typeshed/networkx/algorithms/approximation/independent_set.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/approximation/independent_set.pyi
rename to typeshed/networkx/algorithms/approximation/independent_set.pyi
diff --git a/typeshed/2and3/networkx/algorithms/approximation/kcomponents.pyi b/typeshed/networkx/algorithms/approximation/kcomponents.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/approximation/kcomponents.pyi
rename to typeshed/networkx/algorithms/approximation/kcomponents.pyi
diff --git a/typeshed/2and3/networkx/algorithms/approximation/matching.pyi b/typeshed/networkx/algorithms/approximation/matching.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/approximation/matching.pyi
rename to typeshed/networkx/algorithms/approximation/matching.pyi
diff --git a/typeshed/2and3/networkx/algorithms/approximation/ramsey.pyi b/typeshed/networkx/algorithms/approximation/ramsey.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/approximation/ramsey.pyi
rename to typeshed/networkx/algorithms/approximation/ramsey.pyi
diff --git a/typeshed/2and3/networkx/algorithms/approximation/steinertree.pyi b/typeshed/networkx/algorithms/approximation/steinertree.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/approximation/steinertree.pyi
rename to typeshed/networkx/algorithms/approximation/steinertree.pyi
diff --git a/typeshed/2and3/networkx/algorithms/approximation/vertex_cover.pyi b/typeshed/networkx/algorithms/approximation/vertex_cover.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/approximation/vertex_cover.pyi
rename to typeshed/networkx/algorithms/approximation/vertex_cover.pyi
diff --git a/typeshed/2and3/networkx/algorithms/assortativity/__init__.pyi b/typeshed/networkx/algorithms/assortativity/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/assortativity/__init__.pyi
rename to typeshed/networkx/algorithms/assortativity/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/assortativity/connectivity.pyi b/typeshed/networkx/algorithms/assortativity/connectivity.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/assortativity/connectivity.pyi
rename to typeshed/networkx/algorithms/assortativity/connectivity.pyi
diff --git a/typeshed/2and3/networkx/algorithms/assortativity/correlation.pyi b/typeshed/networkx/algorithms/assortativity/correlation.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/assortativity/correlation.pyi
rename to typeshed/networkx/algorithms/assortativity/correlation.pyi
diff --git a/typeshed/2and3/networkx/algorithms/assortativity/mixing.pyi b/typeshed/networkx/algorithms/assortativity/mixing.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/assortativity/mixing.pyi
rename to typeshed/networkx/algorithms/assortativity/mixing.pyi
diff --git a/typeshed/2and3/networkx/algorithms/assortativity/neighbor_degree.pyi b/typeshed/networkx/algorithms/assortativity/neighbor_degree.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/assortativity/neighbor_degree.pyi
rename to typeshed/networkx/algorithms/assortativity/neighbor_degree.pyi
diff --git a/typeshed/2and3/networkx/algorithms/assortativity/pairs.pyi b/typeshed/networkx/algorithms/assortativity/pairs.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/assortativity/pairs.pyi
rename to typeshed/networkx/algorithms/assortativity/pairs.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bipartite/__init__.pyi b/typeshed/networkx/algorithms/bipartite/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bipartite/__init__.pyi
rename to typeshed/networkx/algorithms/bipartite/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bipartite/basic.pyi b/typeshed/networkx/algorithms/bipartite/basic.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bipartite/basic.pyi
rename to typeshed/networkx/algorithms/bipartite/basic.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bipartite/centrality.pyi b/typeshed/networkx/algorithms/bipartite/centrality.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bipartite/centrality.pyi
rename to typeshed/networkx/algorithms/bipartite/centrality.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bipartite/cluster.pyi b/typeshed/networkx/algorithms/bipartite/cluster.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bipartite/cluster.pyi
rename to typeshed/networkx/algorithms/bipartite/cluster.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bipartite/covering.pyi b/typeshed/networkx/algorithms/bipartite/covering.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bipartite/covering.pyi
rename to typeshed/networkx/algorithms/bipartite/covering.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bipartite/edgelist.pyi b/typeshed/networkx/algorithms/bipartite/edgelist.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bipartite/edgelist.pyi
rename to typeshed/networkx/algorithms/bipartite/edgelist.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bipartite/generators.pyi b/typeshed/networkx/algorithms/bipartite/generators.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bipartite/generators.pyi
rename to typeshed/networkx/algorithms/bipartite/generators.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bipartite/matching.pyi b/typeshed/networkx/algorithms/bipartite/matching.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bipartite/matching.pyi
rename to typeshed/networkx/algorithms/bipartite/matching.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bipartite/matrix.pyi b/typeshed/networkx/algorithms/bipartite/matrix.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bipartite/matrix.pyi
rename to typeshed/networkx/algorithms/bipartite/matrix.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bipartite/projection.pyi b/typeshed/networkx/algorithms/bipartite/projection.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bipartite/projection.pyi
rename to typeshed/networkx/algorithms/bipartite/projection.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bipartite/redundancy.pyi b/typeshed/networkx/algorithms/bipartite/redundancy.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bipartite/redundancy.pyi
rename to typeshed/networkx/algorithms/bipartite/redundancy.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bipartite/spectral.pyi b/typeshed/networkx/algorithms/bipartite/spectral.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bipartite/spectral.pyi
rename to typeshed/networkx/algorithms/bipartite/spectral.pyi
diff --git a/typeshed/2and3/networkx/algorithms/boundary.pyi b/typeshed/networkx/algorithms/boundary.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/boundary.pyi
rename to typeshed/networkx/algorithms/boundary.pyi
diff --git a/typeshed/2and3/networkx/algorithms/bridges.pyi b/typeshed/networkx/algorithms/bridges.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/bridges.pyi
rename to typeshed/networkx/algorithms/bridges.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/betweenness.pyi b/typeshed/networkx/algorithms/centrality/betweenness.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/betweenness.pyi
rename to typeshed/networkx/algorithms/centrality/betweenness.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/betweenness_subset.pyi b/typeshed/networkx/algorithms/centrality/betweenness_subset.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/betweenness_subset.pyi
rename to typeshed/networkx/algorithms/centrality/betweenness_subset.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/closeness.pyi b/typeshed/networkx/algorithms/centrality/closeness.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/closeness.pyi
rename to typeshed/networkx/algorithms/centrality/closeness.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/current_flow_betweenness.pyi b/typeshed/networkx/algorithms/centrality/current_flow_betweenness.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/current_flow_betweenness.pyi
rename to typeshed/networkx/algorithms/centrality/current_flow_betweenness.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/current_flow_betweenness_subset.pyi b/typeshed/networkx/algorithms/centrality/current_flow_betweenness_subset.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/current_flow_betweenness_subset.pyi
rename to typeshed/networkx/algorithms/centrality/current_flow_betweenness_subset.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/current_flow_closeness.pyi b/typeshed/networkx/algorithms/centrality/current_flow_closeness.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/current_flow_closeness.pyi
rename to typeshed/networkx/algorithms/centrality/current_flow_closeness.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/degree_alg.pyi b/typeshed/networkx/algorithms/centrality/degree_alg.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/degree_alg.pyi
rename to typeshed/networkx/algorithms/centrality/degree_alg.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/dispersion.pyi b/typeshed/networkx/algorithms/centrality/dispersion.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/dispersion.pyi
rename to typeshed/networkx/algorithms/centrality/dispersion.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/eigenvector.pyi b/typeshed/networkx/algorithms/centrality/eigenvector.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/eigenvector.pyi
rename to typeshed/networkx/algorithms/centrality/eigenvector.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/flow_matrix.pyi b/typeshed/networkx/algorithms/centrality/flow_matrix.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/flow_matrix.pyi
rename to typeshed/networkx/algorithms/centrality/flow_matrix.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/harmonic.pyi b/typeshed/networkx/algorithms/centrality/harmonic.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/harmonic.pyi
rename to typeshed/networkx/algorithms/centrality/harmonic.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/katz.pyi b/typeshed/networkx/algorithms/centrality/katz.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/katz.pyi
rename to typeshed/networkx/algorithms/centrality/katz.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/load.pyi b/typeshed/networkx/algorithms/centrality/load.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/load.pyi
rename to typeshed/networkx/algorithms/centrality/load.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/reaching.pyi b/typeshed/networkx/algorithms/centrality/reaching.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/reaching.pyi
rename to typeshed/networkx/algorithms/centrality/reaching.pyi
diff --git a/typeshed/2and3/networkx/algorithms/centrality/subgraph_alg.pyi b/typeshed/networkx/algorithms/centrality/subgraph_alg.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/centrality/subgraph_alg.pyi
rename to typeshed/networkx/algorithms/centrality/subgraph_alg.pyi
diff --git a/typeshed/2and3/networkx/algorithms/chains.pyi b/typeshed/networkx/algorithms/chains.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/chains.pyi
rename to typeshed/networkx/algorithms/chains.pyi
diff --git a/typeshed/2and3/networkx/algorithms/chordal.pyi b/typeshed/networkx/algorithms/chordal.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/chordal.pyi
rename to typeshed/networkx/algorithms/chordal.pyi
diff --git a/typeshed/2and3/networkx/algorithms/clique.pyi b/typeshed/networkx/algorithms/clique.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/clique.pyi
rename to typeshed/networkx/algorithms/clique.pyi
diff --git a/typeshed/2and3/networkx/algorithms/cluster.pyi b/typeshed/networkx/algorithms/cluster.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/cluster.pyi
rename to typeshed/networkx/algorithms/cluster.pyi
diff --git a/typeshed/2and3/networkx/algorithms/coloring/__init__.pyi b/typeshed/networkx/algorithms/coloring/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/coloring/__init__.pyi
rename to typeshed/networkx/algorithms/coloring/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/coloring/greedy_coloring.pyi b/typeshed/networkx/algorithms/coloring/greedy_coloring.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/coloring/greedy_coloring.pyi
rename to typeshed/networkx/algorithms/coloring/greedy_coloring.pyi
diff --git a/typeshed/2and3/networkx/algorithms/coloring/greedy_coloring_with_interchange.pyi b/typeshed/networkx/algorithms/coloring/greedy_coloring_with_interchange.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/coloring/greedy_coloring_with_interchange.pyi
rename to typeshed/networkx/algorithms/coloring/greedy_coloring_with_interchange.pyi
diff --git a/typeshed/2and3/networkx/algorithms/communicability_alg.pyi b/typeshed/networkx/algorithms/communicability_alg.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/communicability_alg.pyi
rename to typeshed/networkx/algorithms/communicability_alg.pyi
diff --git a/typeshed/2and3/networkx/algorithms/community/__init__.pyi b/typeshed/networkx/algorithms/community/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/community/__init__.pyi
rename to typeshed/networkx/algorithms/community/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/community/asyn_fluidc.pyi b/typeshed/networkx/algorithms/community/asyn_fluidc.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/community/asyn_fluidc.pyi
rename to typeshed/networkx/algorithms/community/asyn_fluidc.pyi
diff --git a/typeshed/2and3/networkx/algorithms/community/centrality.pyi b/typeshed/networkx/algorithms/community/centrality.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/community/centrality.pyi
rename to typeshed/networkx/algorithms/community/centrality.pyi
diff --git a/typeshed/2and3/networkx/algorithms/community/community_generators.pyi b/typeshed/networkx/algorithms/community/community_generators.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/community/community_generators.pyi
rename to typeshed/networkx/algorithms/community/community_generators.pyi
diff --git a/typeshed/2and3/networkx/algorithms/community/community_utils.pyi b/typeshed/networkx/algorithms/community/community_utils.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/community/community_utils.pyi
rename to typeshed/networkx/algorithms/community/community_utils.pyi
diff --git a/typeshed/2and3/networkx/algorithms/community/kclique.pyi b/typeshed/networkx/algorithms/community/kclique.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/community/kclique.pyi
rename to typeshed/networkx/algorithms/community/kclique.pyi
diff --git a/typeshed/2and3/networkx/algorithms/community/kernighan_lin.pyi b/typeshed/networkx/algorithms/community/kernighan_lin.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/community/kernighan_lin.pyi
rename to typeshed/networkx/algorithms/community/kernighan_lin.pyi
diff --git a/typeshed/2and3/networkx/algorithms/community/label_propagation.pyi b/typeshed/networkx/algorithms/community/label_propagation.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/community/label_propagation.pyi
rename to typeshed/networkx/algorithms/community/label_propagation.pyi
diff --git a/typeshed/2and3/networkx/algorithms/community/quality.pyi b/typeshed/networkx/algorithms/community/quality.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/community/quality.pyi
rename to typeshed/networkx/algorithms/community/quality.pyi
diff --git a/typeshed/2and3/networkx/algorithms/components/__init__.pyi b/typeshed/networkx/algorithms/components/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/components/__init__.pyi
rename to typeshed/networkx/algorithms/components/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/components/attracting.pyi b/typeshed/networkx/algorithms/components/attracting.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/components/attracting.pyi
rename to typeshed/networkx/algorithms/components/attracting.pyi
diff --git a/typeshed/2and3/networkx/algorithms/components/biconnected.pyi b/typeshed/networkx/algorithms/components/biconnected.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/components/biconnected.pyi
rename to typeshed/networkx/algorithms/components/biconnected.pyi
diff --git a/typeshed/2and3/networkx/algorithms/components/connected.pyi b/typeshed/networkx/algorithms/components/connected.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/components/connected.pyi
rename to typeshed/networkx/algorithms/components/connected.pyi
diff --git a/typeshed/2and3/networkx/algorithms/components/semiconnected.pyi b/typeshed/networkx/algorithms/components/semiconnected.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/components/semiconnected.pyi
rename to typeshed/networkx/algorithms/components/semiconnected.pyi
diff --git a/typeshed/2and3/networkx/algorithms/components/strongly_connected.pyi b/typeshed/networkx/algorithms/components/strongly_connected.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/components/strongly_connected.pyi
rename to typeshed/networkx/algorithms/components/strongly_connected.pyi
diff --git a/typeshed/2and3/networkx/algorithms/components/weakly_connected.pyi b/typeshed/networkx/algorithms/components/weakly_connected.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/components/weakly_connected.pyi
rename to typeshed/networkx/algorithms/components/weakly_connected.pyi
diff --git a/typeshed/2and3/networkx/algorithms/connectivity/__init__.pyi b/typeshed/networkx/algorithms/connectivity/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/connectivity/__init__.pyi
rename to typeshed/networkx/algorithms/connectivity/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/connectivity/connectivity.pyi b/typeshed/networkx/algorithms/connectivity/connectivity.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/connectivity/connectivity.pyi
rename to typeshed/networkx/algorithms/connectivity/connectivity.pyi
diff --git a/typeshed/2and3/networkx/algorithms/connectivity/cuts.pyi b/typeshed/networkx/algorithms/connectivity/cuts.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/connectivity/cuts.pyi
rename to typeshed/networkx/algorithms/connectivity/cuts.pyi
diff --git a/typeshed/2and3/networkx/algorithms/connectivity/disjoint_paths.pyi b/typeshed/networkx/algorithms/connectivity/disjoint_paths.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/connectivity/disjoint_paths.pyi
rename to typeshed/networkx/algorithms/connectivity/disjoint_paths.pyi
diff --git a/typeshed/2and3/networkx/algorithms/connectivity/edge_augmentation.pyi b/typeshed/networkx/algorithms/connectivity/edge_augmentation.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/connectivity/edge_augmentation.pyi
rename to typeshed/networkx/algorithms/connectivity/edge_augmentation.pyi
diff --git a/typeshed/2and3/networkx/algorithms/connectivity/edge_kcomponents.pyi b/typeshed/networkx/algorithms/connectivity/edge_kcomponents.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/connectivity/edge_kcomponents.pyi
rename to typeshed/networkx/algorithms/connectivity/edge_kcomponents.pyi
diff --git a/typeshed/2and3/networkx/algorithms/connectivity/kcomponents.pyi b/typeshed/networkx/algorithms/connectivity/kcomponents.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/connectivity/kcomponents.pyi
rename to typeshed/networkx/algorithms/connectivity/kcomponents.pyi
diff --git a/typeshed/2and3/networkx/algorithms/connectivity/kcutsets.pyi b/typeshed/networkx/algorithms/connectivity/kcutsets.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/connectivity/kcutsets.pyi
rename to typeshed/networkx/algorithms/connectivity/kcutsets.pyi
diff --git a/typeshed/2and3/networkx/algorithms/connectivity/stoerwagner.pyi b/typeshed/networkx/algorithms/connectivity/stoerwagner.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/connectivity/stoerwagner.pyi
rename to typeshed/networkx/algorithms/connectivity/stoerwagner.pyi
diff --git a/typeshed/2and3/networkx/algorithms/connectivity/utils.pyi b/typeshed/networkx/algorithms/connectivity/utils.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/connectivity/utils.pyi
rename to typeshed/networkx/algorithms/connectivity/utils.pyi
diff --git a/typeshed/2and3/networkx/algorithms/core.pyi b/typeshed/networkx/algorithms/core.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/core.pyi
rename to typeshed/networkx/algorithms/core.pyi
diff --git a/typeshed/2and3/networkx/algorithms/covering.pyi b/typeshed/networkx/algorithms/covering.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/covering.pyi
rename to typeshed/networkx/algorithms/covering.pyi
diff --git a/typeshed/2and3/networkx/algorithms/cuts.pyi b/typeshed/networkx/algorithms/cuts.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/cuts.pyi
rename to typeshed/networkx/algorithms/cuts.pyi
diff --git a/typeshed/2and3/networkx/algorithms/cycles.pyi b/typeshed/networkx/algorithms/cycles.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/cycles.pyi
rename to typeshed/networkx/algorithms/cycles.pyi
diff --git a/typeshed/2and3/networkx/algorithms/dag.pyi b/typeshed/networkx/algorithms/dag.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/dag.pyi
rename to typeshed/networkx/algorithms/dag.pyi
diff --git a/typeshed/2and3/networkx/algorithms/distance_measures.pyi b/typeshed/networkx/algorithms/distance_measures.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/distance_measures.pyi
rename to typeshed/networkx/algorithms/distance_measures.pyi
diff --git a/typeshed/2and3/networkx/algorithms/distance_regular.pyi b/typeshed/networkx/algorithms/distance_regular.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/distance_regular.pyi
rename to typeshed/networkx/algorithms/distance_regular.pyi
diff --git a/typeshed/2and3/networkx/algorithms/dominance.pyi b/typeshed/networkx/algorithms/dominance.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/dominance.pyi
rename to typeshed/networkx/algorithms/dominance.pyi
diff --git a/typeshed/2and3/networkx/algorithms/dominating.pyi b/typeshed/networkx/algorithms/dominating.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/dominating.pyi
rename to typeshed/networkx/algorithms/dominating.pyi
diff --git a/typeshed/2and3/networkx/algorithms/efficiency.pyi b/typeshed/networkx/algorithms/efficiency.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/efficiency.pyi
rename to typeshed/networkx/algorithms/efficiency.pyi
diff --git a/typeshed/2and3/networkx/algorithms/euler.pyi b/typeshed/networkx/algorithms/euler.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/euler.pyi
rename to typeshed/networkx/algorithms/euler.pyi
diff --git a/typeshed/2and3/networkx/algorithms/flow/__init__.pyi b/typeshed/networkx/algorithms/flow/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/flow/__init__.pyi
rename to typeshed/networkx/algorithms/flow/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/flow/boykovkolmogorov.pyi b/typeshed/networkx/algorithms/flow/boykovkolmogorov.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/flow/boykovkolmogorov.pyi
rename to typeshed/networkx/algorithms/flow/boykovkolmogorov.pyi
diff --git a/typeshed/2and3/networkx/algorithms/flow/capacityscaling.pyi b/typeshed/networkx/algorithms/flow/capacityscaling.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/flow/capacityscaling.pyi
rename to typeshed/networkx/algorithms/flow/capacityscaling.pyi
diff --git a/typeshed/2and3/networkx/algorithms/flow/dinitz_alg.pyi b/typeshed/networkx/algorithms/flow/dinitz_alg.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/flow/dinitz_alg.pyi
rename to typeshed/networkx/algorithms/flow/dinitz_alg.pyi
diff --git a/typeshed/2and3/networkx/algorithms/flow/edmondskarp.pyi b/typeshed/networkx/algorithms/flow/edmondskarp.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/flow/edmondskarp.pyi
rename to typeshed/networkx/algorithms/flow/edmondskarp.pyi
diff --git a/typeshed/2and3/networkx/algorithms/flow/gomory_hu.pyi b/typeshed/networkx/algorithms/flow/gomory_hu.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/flow/gomory_hu.pyi
rename to typeshed/networkx/algorithms/flow/gomory_hu.pyi
diff --git a/typeshed/2and3/networkx/algorithms/flow/maxflow.pyi b/typeshed/networkx/algorithms/flow/maxflow.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/flow/maxflow.pyi
rename to typeshed/networkx/algorithms/flow/maxflow.pyi
diff --git a/typeshed/2and3/networkx/algorithms/flow/mincost.pyi b/typeshed/networkx/algorithms/flow/mincost.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/flow/mincost.pyi
rename to typeshed/networkx/algorithms/flow/mincost.pyi
diff --git a/typeshed/2and3/networkx/algorithms/flow/networksimplex.pyi b/typeshed/networkx/algorithms/flow/networksimplex.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/flow/networksimplex.pyi
rename to typeshed/networkx/algorithms/flow/networksimplex.pyi
diff --git a/typeshed/2and3/networkx/algorithms/flow/preflowpush.pyi b/typeshed/networkx/algorithms/flow/preflowpush.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/flow/preflowpush.pyi
rename to typeshed/networkx/algorithms/flow/preflowpush.pyi
diff --git a/typeshed/2and3/networkx/algorithms/flow/shortestaugmentingpath.pyi b/typeshed/networkx/algorithms/flow/shortestaugmentingpath.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/flow/shortestaugmentingpath.pyi
rename to typeshed/networkx/algorithms/flow/shortestaugmentingpath.pyi
diff --git a/typeshed/2and3/networkx/algorithms/flow/utils.pyi b/typeshed/networkx/algorithms/flow/utils.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/flow/utils.pyi
rename to typeshed/networkx/algorithms/flow/utils.pyi
diff --git a/typeshed/2and3/networkx/algorithms/graphical.pyi b/typeshed/networkx/algorithms/graphical.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/graphical.pyi
rename to typeshed/networkx/algorithms/graphical.pyi
diff --git a/typeshed/2and3/networkx/algorithms/hierarchy.pyi b/typeshed/networkx/algorithms/hierarchy.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/hierarchy.pyi
rename to typeshed/networkx/algorithms/hierarchy.pyi
diff --git a/typeshed/2and3/networkx/algorithms/hybrid.pyi b/typeshed/networkx/algorithms/hybrid.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/hybrid.pyi
rename to typeshed/networkx/algorithms/hybrid.pyi
diff --git a/typeshed/2and3/networkx/algorithms/isolate.pyi b/typeshed/networkx/algorithms/isolate.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/isolate.pyi
rename to typeshed/networkx/algorithms/isolate.pyi
diff --git a/typeshed/2and3/networkx/algorithms/isomorphism/__init__.pyi b/typeshed/networkx/algorithms/isomorphism/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/isomorphism/__init__.pyi
rename to typeshed/networkx/algorithms/isomorphism/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/isomorphism/isomorph.pyi b/typeshed/networkx/algorithms/isomorphism/isomorph.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/isomorphism/isomorph.pyi
rename to typeshed/networkx/algorithms/isomorphism/isomorph.pyi
diff --git a/typeshed/2and3/networkx/algorithms/isomorphism/isomorphvf2.pyi b/typeshed/networkx/algorithms/isomorphism/isomorphvf2.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/isomorphism/isomorphvf2.pyi
rename to typeshed/networkx/algorithms/isomorphism/isomorphvf2.pyi
diff --git a/typeshed/2and3/networkx/algorithms/isomorphism/matchhelpers.pyi b/typeshed/networkx/algorithms/isomorphism/matchhelpers.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/isomorphism/matchhelpers.pyi
rename to typeshed/networkx/algorithms/isomorphism/matchhelpers.pyi
diff --git a/typeshed/2and3/networkx/algorithms/isomorphism/temporalisomorphvf2.pyi b/typeshed/networkx/algorithms/isomorphism/temporalisomorphvf2.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/isomorphism/temporalisomorphvf2.pyi
rename to typeshed/networkx/algorithms/isomorphism/temporalisomorphvf2.pyi
diff --git a/typeshed/2and3/networkx/algorithms/isomorphism/vf2userfunc.pyi b/typeshed/networkx/algorithms/isomorphism/vf2userfunc.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/isomorphism/vf2userfunc.pyi
rename to typeshed/networkx/algorithms/isomorphism/vf2userfunc.pyi
diff --git a/typeshed/2and3/networkx/algorithms/link_analysis/__init__.pyi b/typeshed/networkx/algorithms/link_analysis/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/link_analysis/__init__.pyi
rename to typeshed/networkx/algorithms/link_analysis/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/link_analysis/hits_alg.pyi b/typeshed/networkx/algorithms/link_analysis/hits_alg.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/link_analysis/hits_alg.pyi
rename to typeshed/networkx/algorithms/link_analysis/hits_alg.pyi
diff --git a/typeshed/2and3/networkx/algorithms/link_analysis/pagerank_alg.pyi b/typeshed/networkx/algorithms/link_analysis/pagerank_alg.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/link_analysis/pagerank_alg.pyi
rename to typeshed/networkx/algorithms/link_analysis/pagerank_alg.pyi
diff --git a/typeshed/2and3/networkx/algorithms/link_prediction.pyi b/typeshed/networkx/algorithms/link_prediction.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/link_prediction.pyi
rename to typeshed/networkx/algorithms/link_prediction.pyi
diff --git a/typeshed/2and3/networkx/algorithms/lowest_common_ancestors.pyi b/typeshed/networkx/algorithms/lowest_common_ancestors.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/lowest_common_ancestors.pyi
rename to typeshed/networkx/algorithms/lowest_common_ancestors.pyi
diff --git a/typeshed/2and3/networkx/algorithms/matching.pyi b/typeshed/networkx/algorithms/matching.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/matching.pyi
rename to typeshed/networkx/algorithms/matching.pyi
diff --git a/typeshed/2and3/networkx/algorithms/minors.pyi b/typeshed/networkx/algorithms/minors.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/minors.pyi
rename to typeshed/networkx/algorithms/minors.pyi
diff --git a/typeshed/2and3/networkx/algorithms/mis.pyi b/typeshed/networkx/algorithms/mis.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/mis.pyi
rename to typeshed/networkx/algorithms/mis.pyi
diff --git a/typeshed/2and3/networkx/algorithms/operators/__init__.pyi b/typeshed/networkx/algorithms/operators/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/operators/__init__.pyi
rename to typeshed/networkx/algorithms/operators/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/operators/all.pyi b/typeshed/networkx/algorithms/operators/all.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/operators/all.pyi
rename to typeshed/networkx/algorithms/operators/all.pyi
diff --git a/typeshed/2and3/networkx/algorithms/operators/binary.pyi b/typeshed/networkx/algorithms/operators/binary.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/operators/binary.pyi
rename to typeshed/networkx/algorithms/operators/binary.pyi
diff --git a/typeshed/2and3/networkx/algorithms/operators/product.pyi b/typeshed/networkx/algorithms/operators/product.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/operators/product.pyi
rename to typeshed/networkx/algorithms/operators/product.pyi
diff --git a/typeshed/2and3/networkx/algorithms/operators/unary.pyi b/typeshed/networkx/algorithms/operators/unary.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/operators/unary.pyi
rename to typeshed/networkx/algorithms/operators/unary.pyi
diff --git a/typeshed/2and3/networkx/algorithms/reciprocity.pyi b/typeshed/networkx/algorithms/reciprocity.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/reciprocity.pyi
rename to typeshed/networkx/algorithms/reciprocity.pyi
diff --git a/typeshed/2and3/networkx/algorithms/richclub.pyi b/typeshed/networkx/algorithms/richclub.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/richclub.pyi
rename to typeshed/networkx/algorithms/richclub.pyi
diff --git a/typeshed/2and3/networkx/algorithms/shortest_paths/__init__.pyi b/typeshed/networkx/algorithms/shortest_paths/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/shortest_paths/__init__.pyi
rename to typeshed/networkx/algorithms/shortest_paths/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/shortest_paths/astar.pyi b/typeshed/networkx/algorithms/shortest_paths/astar.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/shortest_paths/astar.pyi
rename to typeshed/networkx/algorithms/shortest_paths/astar.pyi
diff --git a/typeshed/2and3/networkx/algorithms/shortest_paths/dense.pyi b/typeshed/networkx/algorithms/shortest_paths/dense.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/shortest_paths/dense.pyi
rename to typeshed/networkx/algorithms/shortest_paths/dense.pyi
diff --git a/typeshed/2and3/networkx/algorithms/shortest_paths/generic.pyi b/typeshed/networkx/algorithms/shortest_paths/generic.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/shortest_paths/generic.pyi
rename to typeshed/networkx/algorithms/shortest_paths/generic.pyi
diff --git a/typeshed/2and3/networkx/algorithms/shortest_paths/unweighted.pyi b/typeshed/networkx/algorithms/shortest_paths/unweighted.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/shortest_paths/unweighted.pyi
rename to typeshed/networkx/algorithms/shortest_paths/unweighted.pyi
diff --git a/typeshed/2and3/networkx/algorithms/shortest_paths/weighted.pyi b/typeshed/networkx/algorithms/shortest_paths/weighted.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/shortest_paths/weighted.pyi
rename to typeshed/networkx/algorithms/shortest_paths/weighted.pyi
diff --git a/typeshed/2and3/networkx/algorithms/similarity.pyi b/typeshed/networkx/algorithms/similarity.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/similarity.pyi
rename to typeshed/networkx/algorithms/similarity.pyi
diff --git a/typeshed/2and3/networkx/algorithms/simple_paths.pyi b/typeshed/networkx/algorithms/simple_paths.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/simple_paths.pyi
rename to typeshed/networkx/algorithms/simple_paths.pyi
diff --git a/typeshed/2and3/networkx/algorithms/smetric.pyi b/typeshed/networkx/algorithms/smetric.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/smetric.pyi
rename to typeshed/networkx/algorithms/smetric.pyi
diff --git a/typeshed/2and3/networkx/algorithms/structuralholes.pyi b/typeshed/networkx/algorithms/structuralholes.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/structuralholes.pyi
rename to typeshed/networkx/algorithms/structuralholes.pyi
diff --git a/typeshed/2and3/networkx/algorithms/swap.pyi b/typeshed/networkx/algorithms/swap.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/swap.pyi
rename to typeshed/networkx/algorithms/swap.pyi
diff --git a/typeshed/2and3/networkx/algorithms/threshold.pyi b/typeshed/networkx/algorithms/threshold.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/threshold.pyi
rename to typeshed/networkx/algorithms/threshold.pyi
diff --git a/typeshed/2and3/networkx/algorithms/tournament.pyi b/typeshed/networkx/algorithms/tournament.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/tournament.pyi
rename to typeshed/networkx/algorithms/tournament.pyi
diff --git a/typeshed/2and3/networkx/algorithms/traversal/__init__.pyi b/typeshed/networkx/algorithms/traversal/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/traversal/__init__.pyi
rename to typeshed/networkx/algorithms/traversal/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/traversal/beamsearch.pyi b/typeshed/networkx/algorithms/traversal/beamsearch.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/traversal/beamsearch.pyi
rename to typeshed/networkx/algorithms/traversal/beamsearch.pyi
diff --git a/typeshed/2and3/networkx/algorithms/traversal/breadth_first_search.pyi b/typeshed/networkx/algorithms/traversal/breadth_first_search.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/traversal/breadth_first_search.pyi
rename to typeshed/networkx/algorithms/traversal/breadth_first_search.pyi
diff --git a/typeshed/2and3/networkx/algorithms/traversal/depth_first_search.pyi b/typeshed/networkx/algorithms/traversal/depth_first_search.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/traversal/depth_first_search.pyi
rename to typeshed/networkx/algorithms/traversal/depth_first_search.pyi
diff --git a/typeshed/2and3/networkx/algorithms/traversal/edgedfs.pyi b/typeshed/networkx/algorithms/traversal/edgedfs.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/traversal/edgedfs.pyi
rename to typeshed/networkx/algorithms/traversal/edgedfs.pyi
diff --git a/typeshed/2and3/networkx/algorithms/tree/__init__.pyi b/typeshed/networkx/algorithms/tree/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/tree/__init__.pyi
rename to typeshed/networkx/algorithms/tree/__init__.pyi
diff --git a/typeshed/2and3/networkx/algorithms/tree/branchings.pyi b/typeshed/networkx/algorithms/tree/branchings.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/tree/branchings.pyi
rename to typeshed/networkx/algorithms/tree/branchings.pyi
diff --git a/typeshed/2and3/networkx/algorithms/tree/coding.pyi b/typeshed/networkx/algorithms/tree/coding.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/tree/coding.pyi
rename to typeshed/networkx/algorithms/tree/coding.pyi
diff --git a/typeshed/2and3/networkx/algorithms/tree/mst.pyi b/typeshed/networkx/algorithms/tree/mst.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/tree/mst.pyi
rename to typeshed/networkx/algorithms/tree/mst.pyi
diff --git a/typeshed/2and3/networkx/algorithms/tree/operations.pyi b/typeshed/networkx/algorithms/tree/operations.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/tree/operations.pyi
rename to typeshed/networkx/algorithms/tree/operations.pyi
diff --git a/typeshed/2and3/networkx/algorithms/tree/recognition.pyi b/typeshed/networkx/algorithms/tree/recognition.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/tree/recognition.pyi
rename to typeshed/networkx/algorithms/tree/recognition.pyi
diff --git a/typeshed/2and3/networkx/algorithms/triads.pyi b/typeshed/networkx/algorithms/triads.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/triads.pyi
rename to typeshed/networkx/algorithms/triads.pyi
diff --git a/typeshed/2and3/networkx/algorithms/vitality.pyi b/typeshed/networkx/algorithms/vitality.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/vitality.pyi
rename to typeshed/networkx/algorithms/vitality.pyi
diff --git a/typeshed/2and3/networkx/algorithms/voronoi.pyi b/typeshed/networkx/algorithms/voronoi.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/voronoi.pyi
rename to typeshed/networkx/algorithms/voronoi.pyi
diff --git a/typeshed/2and3/networkx/algorithms/wiener.pyi b/typeshed/networkx/algorithms/wiener.pyi
similarity index 100%
rename from typeshed/2and3/networkx/algorithms/wiener.pyi
rename to typeshed/networkx/algorithms/wiener.pyi
diff --git a/typeshed/2and3/networkx/classes/__init__.pyi b/typeshed/networkx/classes/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/classes/__init__.pyi
rename to typeshed/networkx/classes/__init__.pyi
diff --git a/typeshed/2and3/networkx/classes/coreviews.pyi b/typeshed/networkx/classes/coreviews.pyi
similarity index 100%
rename from typeshed/2and3/networkx/classes/coreviews.pyi
rename to typeshed/networkx/classes/coreviews.pyi
diff --git a/typeshed/2and3/networkx/classes/digraph.pyi b/typeshed/networkx/classes/digraph.pyi
similarity index 100%
rename from typeshed/2and3/networkx/classes/digraph.pyi
rename to typeshed/networkx/classes/digraph.pyi
diff --git a/typeshed/2and3/networkx/classes/filters.pyi b/typeshed/networkx/classes/filters.pyi
similarity index 100%
rename from typeshed/2and3/networkx/classes/filters.pyi
rename to typeshed/networkx/classes/filters.pyi
diff --git a/typeshed/2and3/networkx/classes/function.pyi b/typeshed/networkx/classes/function.pyi
similarity index 100%
rename from typeshed/2and3/networkx/classes/function.pyi
rename to typeshed/networkx/classes/function.pyi
diff --git a/typeshed/2and3/networkx/classes/graph.pyi b/typeshed/networkx/classes/graph.pyi
similarity index 100%
rename from typeshed/2and3/networkx/classes/graph.pyi
rename to typeshed/networkx/classes/graph.pyi
diff --git a/typeshed/2and3/networkx/classes/graphviews.pyi b/typeshed/networkx/classes/graphviews.pyi
similarity index 100%
rename from typeshed/2and3/networkx/classes/graphviews.pyi
rename to typeshed/networkx/classes/graphviews.pyi
diff --git a/typeshed/2and3/networkx/classes/multidigraph.pyi b/typeshed/networkx/classes/multidigraph.pyi
similarity index 100%
rename from typeshed/2and3/networkx/classes/multidigraph.pyi
rename to typeshed/networkx/classes/multidigraph.pyi
diff --git a/typeshed/2and3/networkx/classes/multigraph.pyi b/typeshed/networkx/classes/multigraph.pyi
similarity index 100%
rename from typeshed/2and3/networkx/classes/multigraph.pyi
rename to typeshed/networkx/classes/multigraph.pyi
diff --git a/typeshed/2and3/networkx/classes/ordered.pyi b/typeshed/networkx/classes/ordered.pyi
similarity index 100%
rename from typeshed/2and3/networkx/classes/ordered.pyi
rename to typeshed/networkx/classes/ordered.pyi
diff --git a/typeshed/2and3/networkx/classes/reportviews.pyi b/typeshed/networkx/classes/reportviews.pyi
similarity index 100%
rename from typeshed/2and3/networkx/classes/reportviews.pyi
rename to typeshed/networkx/classes/reportviews.pyi
diff --git a/typeshed/2and3/networkx/convert.pyi b/typeshed/networkx/convert.pyi
similarity index 100%
rename from typeshed/2and3/networkx/convert.pyi
rename to typeshed/networkx/convert.pyi
diff --git a/typeshed/2and3/networkx/convert_matrix.pyi b/typeshed/networkx/convert_matrix.pyi
similarity index 100%
rename from typeshed/2and3/networkx/convert_matrix.pyi
rename to typeshed/networkx/convert_matrix.pyi
diff --git a/typeshed/2and3/networkx/drawing/__init__.pyi b/typeshed/networkx/drawing/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/drawing/__init__.pyi
rename to typeshed/networkx/drawing/__init__.pyi
diff --git a/typeshed/2and3/networkx/drawing/layout.pyi b/typeshed/networkx/drawing/layout.pyi
similarity index 100%
rename from typeshed/2and3/networkx/drawing/layout.pyi
rename to typeshed/networkx/drawing/layout.pyi
diff --git a/typeshed/2and3/networkx/drawing/nx_agraph.pyi b/typeshed/networkx/drawing/nx_agraph.pyi
similarity index 100%
rename from typeshed/2and3/networkx/drawing/nx_agraph.pyi
rename to typeshed/networkx/drawing/nx_agraph.pyi
diff --git a/typeshed/2and3/networkx/drawing/nx_pydot.pyi b/typeshed/networkx/drawing/nx_pydot.pyi
similarity index 100%
rename from typeshed/2and3/networkx/drawing/nx_pydot.pyi
rename to typeshed/networkx/drawing/nx_pydot.pyi
diff --git a/typeshed/2and3/networkx/drawing/nx_pylab.pyi b/typeshed/networkx/drawing/nx_pylab.pyi
similarity index 100%
rename from typeshed/2and3/networkx/drawing/nx_pylab.pyi
rename to typeshed/networkx/drawing/nx_pylab.pyi
diff --git a/typeshed/2and3/networkx/exception.pyi b/typeshed/networkx/exception.pyi
similarity index 100%
rename from typeshed/2and3/networkx/exception.pyi
rename to typeshed/networkx/exception.pyi
diff --git a/typeshed/2and3/networkx/generators/atlas.pyi b/typeshed/networkx/generators/atlas.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/atlas.pyi
rename to typeshed/networkx/generators/atlas.pyi
diff --git a/typeshed/2and3/networkx/generators/classic.pyi b/typeshed/networkx/generators/classic.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/classic.pyi
rename to typeshed/networkx/generators/classic.pyi
diff --git a/typeshed/2and3/networkx/generators/community.pyi b/typeshed/networkx/generators/community.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/community.pyi
rename to typeshed/networkx/generators/community.pyi
diff --git a/typeshed/2and3/networkx/generators/degree_seq.pyi b/typeshed/networkx/generators/degree_seq.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/degree_seq.pyi
rename to typeshed/networkx/generators/degree_seq.pyi
diff --git a/typeshed/2and3/networkx/generators/directed.pyi b/typeshed/networkx/generators/directed.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/directed.pyi
rename to typeshed/networkx/generators/directed.pyi
diff --git a/typeshed/2and3/networkx/generators/duplication.pyi b/typeshed/networkx/generators/duplication.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/duplication.pyi
rename to typeshed/networkx/generators/duplication.pyi
diff --git a/typeshed/2and3/networkx/generators/ego.pyi b/typeshed/networkx/generators/ego.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/ego.pyi
rename to typeshed/networkx/generators/ego.pyi
diff --git a/typeshed/2and3/networkx/generators/expanders.pyi b/typeshed/networkx/generators/expanders.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/expanders.pyi
rename to typeshed/networkx/generators/expanders.pyi
diff --git a/typeshed/2and3/networkx/generators/geometric.pyi b/typeshed/networkx/generators/geometric.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/geometric.pyi
rename to typeshed/networkx/generators/geometric.pyi
diff --git a/typeshed/2and3/networkx/generators/intersection.pyi b/typeshed/networkx/generators/intersection.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/intersection.pyi
rename to typeshed/networkx/generators/intersection.pyi
diff --git a/typeshed/2and3/networkx/generators/joint_degree_seq.pyi b/typeshed/networkx/generators/joint_degree_seq.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/joint_degree_seq.pyi
rename to typeshed/networkx/generators/joint_degree_seq.pyi
diff --git a/typeshed/2and3/networkx/generators/lattice.pyi b/typeshed/networkx/generators/lattice.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/lattice.pyi
rename to typeshed/networkx/generators/lattice.pyi
diff --git a/typeshed/2and3/networkx/generators/line.pyi b/typeshed/networkx/generators/line.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/line.pyi
rename to typeshed/networkx/generators/line.pyi
diff --git a/typeshed/2and3/networkx/generators/mycielski.pyi b/typeshed/networkx/generators/mycielski.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/mycielski.pyi
rename to typeshed/networkx/generators/mycielski.pyi
diff --git a/typeshed/2and3/networkx/generators/nonisomorphic_trees.pyi b/typeshed/networkx/generators/nonisomorphic_trees.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/nonisomorphic_trees.pyi
rename to typeshed/networkx/generators/nonisomorphic_trees.pyi
diff --git a/typeshed/2and3/networkx/generators/random_clustered.pyi b/typeshed/networkx/generators/random_clustered.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/random_clustered.pyi
rename to typeshed/networkx/generators/random_clustered.pyi
diff --git a/typeshed/2and3/networkx/generators/random_graphs.pyi b/typeshed/networkx/generators/random_graphs.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/random_graphs.pyi
rename to typeshed/networkx/generators/random_graphs.pyi
diff --git a/typeshed/2and3/networkx/generators/small.pyi b/typeshed/networkx/generators/small.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/small.pyi
rename to typeshed/networkx/generators/small.pyi
diff --git a/typeshed/2and3/networkx/generators/social.pyi b/typeshed/networkx/generators/social.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/social.pyi
rename to typeshed/networkx/generators/social.pyi
diff --git a/typeshed/2and3/networkx/generators/stochastic.pyi b/typeshed/networkx/generators/stochastic.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/stochastic.pyi
rename to typeshed/networkx/generators/stochastic.pyi
diff --git a/typeshed/2and3/networkx/generators/trees.pyi b/typeshed/networkx/generators/trees.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/trees.pyi
rename to typeshed/networkx/generators/trees.pyi
diff --git a/typeshed/2and3/networkx/generators/triads.pyi b/typeshed/networkx/generators/triads.pyi
similarity index 100%
rename from typeshed/2and3/networkx/generators/triads.pyi
rename to typeshed/networkx/generators/triads.pyi
diff --git a/typeshed/2and3/networkx/linalg/__init__.pyi b/typeshed/networkx/linalg/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/linalg/__init__.pyi
rename to typeshed/networkx/linalg/__init__.pyi
diff --git a/typeshed/2and3/networkx/linalg/algebraicconnectivity.pyi b/typeshed/networkx/linalg/algebraicconnectivity.pyi
similarity index 100%
rename from typeshed/2and3/networkx/linalg/algebraicconnectivity.pyi
rename to typeshed/networkx/linalg/algebraicconnectivity.pyi
diff --git a/typeshed/2and3/networkx/linalg/attrmatrix.pyi b/typeshed/networkx/linalg/attrmatrix.pyi
similarity index 100%
rename from typeshed/2and3/networkx/linalg/attrmatrix.pyi
rename to typeshed/networkx/linalg/attrmatrix.pyi
diff --git a/typeshed/2and3/networkx/linalg/graphmatrix.pyi b/typeshed/networkx/linalg/graphmatrix.pyi
similarity index 100%
rename from typeshed/2and3/networkx/linalg/graphmatrix.pyi
rename to typeshed/networkx/linalg/graphmatrix.pyi
diff --git a/typeshed/2and3/networkx/linalg/laplacianmatrix.pyi b/typeshed/networkx/linalg/laplacianmatrix.pyi
similarity index 100%
rename from typeshed/2and3/networkx/linalg/laplacianmatrix.pyi
rename to typeshed/networkx/linalg/laplacianmatrix.pyi
diff --git a/typeshed/2and3/networkx/linalg/modularitymatrix.pyi b/typeshed/networkx/linalg/modularitymatrix.pyi
similarity index 100%
rename from typeshed/2and3/networkx/linalg/modularitymatrix.pyi
rename to typeshed/networkx/linalg/modularitymatrix.pyi
diff --git a/typeshed/2and3/networkx/linalg/spectrum.pyi b/typeshed/networkx/linalg/spectrum.pyi
similarity index 100%
rename from typeshed/2and3/networkx/linalg/spectrum.pyi
rename to typeshed/networkx/linalg/spectrum.pyi
diff --git a/typeshed/2and3/networkx/readwrite/__init__.pyi b/typeshed/networkx/readwrite/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/__init__.pyi
rename to typeshed/networkx/readwrite/__init__.pyi
diff --git a/typeshed/2and3/networkx/readwrite/adjlist.pyi b/typeshed/networkx/readwrite/adjlist.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/adjlist.pyi
rename to typeshed/networkx/readwrite/adjlist.pyi
diff --git a/typeshed/2and3/networkx/readwrite/edgelist.pyi b/typeshed/networkx/readwrite/edgelist.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/edgelist.pyi
rename to typeshed/networkx/readwrite/edgelist.pyi
diff --git a/typeshed/2and3/networkx/readwrite/gexf.pyi b/typeshed/networkx/readwrite/gexf.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/gexf.pyi
rename to typeshed/networkx/readwrite/gexf.pyi
diff --git a/typeshed/2and3/networkx/readwrite/gml.pyi b/typeshed/networkx/readwrite/gml.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/gml.pyi
rename to typeshed/networkx/readwrite/gml.pyi
diff --git a/typeshed/2and3/networkx/readwrite/gpickle.pyi b/typeshed/networkx/readwrite/gpickle.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/gpickle.pyi
rename to typeshed/networkx/readwrite/gpickle.pyi
diff --git a/typeshed/2and3/networkx/readwrite/graph6.pyi b/typeshed/networkx/readwrite/graph6.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/graph6.pyi
rename to typeshed/networkx/readwrite/graph6.pyi
diff --git a/typeshed/2and3/networkx/readwrite/graphml.pyi b/typeshed/networkx/readwrite/graphml.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/graphml.pyi
rename to typeshed/networkx/readwrite/graphml.pyi
diff --git a/typeshed/2and3/networkx/readwrite/json_graph/__init__.pyi b/typeshed/networkx/readwrite/json_graph/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/json_graph/__init__.pyi
rename to typeshed/networkx/readwrite/json_graph/__init__.pyi
diff --git a/typeshed/2and3/networkx/readwrite/json_graph/adjacency.pyi b/typeshed/networkx/readwrite/json_graph/adjacency.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/json_graph/adjacency.pyi
rename to typeshed/networkx/readwrite/json_graph/adjacency.pyi
diff --git a/typeshed/2and3/networkx/readwrite/json_graph/cytoscape.pyi b/typeshed/networkx/readwrite/json_graph/cytoscape.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/json_graph/cytoscape.pyi
rename to typeshed/networkx/readwrite/json_graph/cytoscape.pyi
diff --git a/typeshed/2and3/networkx/readwrite/json_graph/jit.pyi b/typeshed/networkx/readwrite/json_graph/jit.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/json_graph/jit.pyi
rename to typeshed/networkx/readwrite/json_graph/jit.pyi
diff --git a/typeshed/2and3/networkx/readwrite/json_graph/node_link.pyi b/typeshed/networkx/readwrite/json_graph/node_link.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/json_graph/node_link.pyi
rename to typeshed/networkx/readwrite/json_graph/node_link.pyi
diff --git a/typeshed/2and3/networkx/readwrite/json_graph/tree.pyi b/typeshed/networkx/readwrite/json_graph/tree.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/json_graph/tree.pyi
rename to typeshed/networkx/readwrite/json_graph/tree.pyi
diff --git a/typeshed/2and3/networkx/readwrite/leda.pyi b/typeshed/networkx/readwrite/leda.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/leda.pyi
rename to typeshed/networkx/readwrite/leda.pyi
diff --git a/typeshed/2and3/networkx/readwrite/multiline_adjlist.pyi b/typeshed/networkx/readwrite/multiline_adjlist.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/multiline_adjlist.pyi
rename to typeshed/networkx/readwrite/multiline_adjlist.pyi
diff --git a/typeshed/2and3/networkx/readwrite/nx_shp.pyi b/typeshed/networkx/readwrite/nx_shp.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/nx_shp.pyi
rename to typeshed/networkx/readwrite/nx_shp.pyi
diff --git a/typeshed/2and3/networkx/readwrite/nx_yaml.pyi b/typeshed/networkx/readwrite/nx_yaml.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/nx_yaml.pyi
rename to typeshed/networkx/readwrite/nx_yaml.pyi
diff --git a/typeshed/2and3/networkx/readwrite/p2g.pyi b/typeshed/networkx/readwrite/p2g.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/p2g.pyi
rename to typeshed/networkx/readwrite/p2g.pyi
diff --git a/typeshed/2and3/networkx/readwrite/pajek.pyi b/typeshed/networkx/readwrite/pajek.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/pajek.pyi
rename to typeshed/networkx/readwrite/pajek.pyi
diff --git a/typeshed/2and3/networkx/readwrite/sparse6.pyi b/typeshed/networkx/readwrite/sparse6.pyi
similarity index 100%
rename from typeshed/2and3/networkx/readwrite/sparse6.pyi
rename to typeshed/networkx/readwrite/sparse6.pyi
diff --git a/typeshed/2and3/networkx/relabel.pyi b/typeshed/networkx/relabel.pyi
similarity index 100%
rename from typeshed/2and3/networkx/relabel.pyi
rename to typeshed/networkx/relabel.pyi
diff --git a/typeshed/2and3/networkx/release.pyi b/typeshed/networkx/release.pyi
similarity index 100%
rename from typeshed/2and3/networkx/release.pyi
rename to typeshed/networkx/release.pyi
diff --git a/typeshed/2and3/networkx/tests/__init__.pyi b/typeshed/networkx/tests/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/tests/__init__.pyi
rename to typeshed/networkx/tests/__init__.pyi
diff --git a/typeshed/2and3/networkx/tests/test.pyi b/typeshed/networkx/tests/test.pyi
similarity index 100%
rename from typeshed/2and3/networkx/tests/test.pyi
rename to typeshed/networkx/tests/test.pyi
diff --git a/typeshed/2and3/networkx/utils/__init__.pyi b/typeshed/networkx/utils/__init__.pyi
similarity index 100%
rename from typeshed/2and3/networkx/utils/__init__.pyi
rename to typeshed/networkx/utils/__init__.pyi
diff --git a/typeshed/2and3/networkx/utils/contextmanagers.pyi b/typeshed/networkx/utils/contextmanagers.pyi
similarity index 100%
rename from typeshed/2and3/networkx/utils/contextmanagers.pyi
rename to typeshed/networkx/utils/contextmanagers.pyi
diff --git a/typeshed/2and3/networkx/utils/decorators.pyi b/typeshed/networkx/utils/decorators.pyi
similarity index 100%
rename from typeshed/2and3/networkx/utils/decorators.pyi
rename to typeshed/networkx/utils/decorators.pyi
diff --git a/typeshed/2and3/networkx/utils/heaps.pyi b/typeshed/networkx/utils/heaps.pyi
similarity index 100%
rename from typeshed/2and3/networkx/utils/heaps.pyi
rename to typeshed/networkx/utils/heaps.pyi
diff --git a/typeshed/2and3/networkx/utils/misc.pyi b/typeshed/networkx/utils/misc.pyi
similarity index 100%
rename from typeshed/2and3/networkx/utils/misc.pyi
rename to typeshed/networkx/utils/misc.pyi
diff --git a/typeshed/2and3/networkx/utils/random_sequence.pyi b/typeshed/networkx/utils/random_sequence.pyi
similarity index 100%
rename from typeshed/2and3/networkx/utils/random_sequence.pyi
rename to typeshed/networkx/utils/random_sequence.pyi
diff --git a/typeshed/2and3/networkx/utils/rcm.pyi b/typeshed/networkx/utils/rcm.pyi
similarity index 100%
rename from typeshed/2and3/networkx/utils/rcm.pyi
rename to typeshed/networkx/utils/rcm.pyi
diff --git a/typeshed/2and3/networkx/utils/union_find.pyi b/typeshed/networkx/utils/union_find.pyi
similarity index 100%
rename from typeshed/2and3/networkx/utils/union_find.pyi
rename to typeshed/networkx/utils/union_find.pyi
diff --git a/typeshed/2and3/networkx/version.pyi b/typeshed/networkx/version.pyi
similarity index 100%
rename from typeshed/2and3/networkx/version.pyi
rename to typeshed/networkx/version.pyi
diff --git a/typeshed/2and3/prov/__init__.pyi b/typeshed/prov/__init__.pyi
similarity index 100%
rename from typeshed/2and3/prov/__init__.pyi
rename to typeshed/prov/__init__.pyi
diff --git a/typeshed/2and3/prov/constants.pyi b/typeshed/prov/constants.pyi
similarity index 100%
rename from typeshed/2and3/prov/constants.pyi
rename to typeshed/prov/constants.pyi
diff --git a/typeshed/2and3/prov/graph.pyi b/typeshed/prov/graph.pyi
similarity index 100%
rename from typeshed/2and3/prov/graph.pyi
rename to typeshed/prov/graph.pyi
diff --git a/typeshed/2and3/prov/identifier.pyi b/typeshed/prov/identifier.pyi
similarity index 100%
rename from typeshed/2and3/prov/identifier.pyi
rename to typeshed/prov/identifier.pyi
diff --git a/typeshed/2and3/prov/model.pyi b/typeshed/prov/model.pyi
similarity index 100%
rename from typeshed/2and3/prov/model.pyi
rename to typeshed/prov/model.pyi
diff --git a/typeshed/2and3/prov/serializers/__init__.pyi b/typeshed/prov/serializers/__init__.pyi
similarity index 100%
rename from typeshed/2and3/prov/serializers/__init__.pyi
rename to typeshed/prov/serializers/__init__.pyi
diff --git a/typeshed/2and3/prov/serializers/provjson.pyi b/typeshed/prov/serializers/provjson.pyi
similarity index 100%
rename from typeshed/2and3/prov/serializers/provjson.pyi
rename to typeshed/prov/serializers/provjson.pyi
diff --git a/typeshed/2and3/prov/serializers/provn.pyi b/typeshed/prov/serializers/provn.pyi
similarity index 100%
rename from typeshed/2and3/prov/serializers/provn.pyi
rename to typeshed/prov/serializers/provn.pyi
diff --git a/typeshed/2and3/prov/serializers/provrdf.pyi b/typeshed/prov/serializers/provrdf.pyi
similarity index 100%
rename from typeshed/2and3/prov/serializers/provrdf.pyi
rename to typeshed/prov/serializers/provrdf.pyi
diff --git a/typeshed/2and3/prov/serializers/provxml.pyi b/typeshed/prov/serializers/provxml.pyi
similarity index 100%
rename from typeshed/2and3/prov/serializers/provxml.pyi
rename to typeshed/prov/serializers/provxml.pyi
diff --git a/typeshed/2and3/psutil.pyi b/typeshed/psutil.pyi
similarity index 100%
rename from typeshed/2and3/psutil.pyi
rename to typeshed/psutil.pyi
diff --git a/typeshed/2and3/rdflib/__init__.pyi b/typeshed/rdflib/__init__.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/__init__.pyi
rename to typeshed/rdflib/__init__.pyi
diff --git a/typeshed/2and3/rdflib/events.pyi b/typeshed/rdflib/events.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/events.pyi
rename to typeshed/rdflib/events.pyi
diff --git a/typeshed/2and3/rdflib/exceptions.pyi b/typeshed/rdflib/exceptions.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/exceptions.pyi
rename to typeshed/rdflib/exceptions.pyi
diff --git a/typeshed/2and3/rdflib/graph.pyi b/typeshed/rdflib/graph.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/graph.pyi
rename to typeshed/rdflib/graph.pyi
diff --git a/typeshed/2and3/rdflib/namespace.pyi b/typeshed/rdflib/namespace.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/namespace.pyi
rename to typeshed/rdflib/namespace.pyi
diff --git a/typeshed/2and3/rdflib/parser.pyi b/typeshed/rdflib/parser.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/parser.pyi
rename to typeshed/rdflib/parser.pyi
diff --git a/typeshed/2and3/rdflib/plugin.pyi b/typeshed/rdflib/plugin.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/plugin.pyi
rename to typeshed/rdflib/plugin.pyi
diff --git a/typeshed/2and3/rdflib/plugins/__init__.pyi b/typeshed/rdflib/plugins/__init__.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/plugins/__init__.pyi
rename to typeshed/rdflib/plugins/__init__.pyi
diff --git a/typeshed/2and3/rdflib/plugins/parsers/__init__.pyi b/typeshed/rdflib/plugins/parsers/__init__.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/plugins/parsers/__init__.pyi
rename to typeshed/rdflib/plugins/parsers/__init__.pyi
diff --git a/typeshed/2and3/rdflib/plugins/parsers/notation3.pyi b/typeshed/rdflib/plugins/parsers/notation3.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/plugins/parsers/notation3.pyi
rename to typeshed/rdflib/plugins/parsers/notation3.pyi
diff --git a/typeshed/2and3/rdflib/query.pyi b/typeshed/rdflib/query.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/query.pyi
rename to typeshed/rdflib/query.pyi
diff --git a/typeshed/2and3/rdflib/serializer.pyi b/typeshed/rdflib/serializer.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/serializer.pyi
rename to typeshed/rdflib/serializer.pyi
diff --git a/typeshed/2and3/rdflib/store.pyi b/typeshed/rdflib/store.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/store.pyi
rename to typeshed/rdflib/store.pyi
diff --git a/typeshed/2and3/rdflib/term.pyi b/typeshed/rdflib/term.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/term.pyi
rename to typeshed/rdflib/term.pyi
diff --git a/typeshed/2and3/rdflib/util.pyi b/typeshed/rdflib/util.pyi
similarity index 100%
rename from typeshed/2and3/rdflib/util.pyi
rename to typeshed/rdflib/util.pyi
diff --git a/typeshed/2and3/ruamel/__init__.py b/typeshed/ruamel/__init__.py
similarity index 100%
rename from typeshed/2and3/ruamel/__init__.py
rename to typeshed/ruamel/__init__.py
diff --git a/typeshed/2and3/shellescape/__init__.pyi b/typeshed/shellescape/__init__.pyi
similarity index 100%
rename from typeshed/2and3/shellescape/__init__.pyi
rename to typeshed/shellescape/__init__.pyi
diff --git a/typeshed/2and3/shellescape/main.pyi b/typeshed/shellescape/main.pyi
similarity index 100%
rename from typeshed/2and3/shellescape/main.pyi
rename to typeshed/shellescape/main.pyi
diff --git a/typeshed/subprocess.pyi b/typeshed/subprocess.pyi
new file mode 100644
index 0000000000..5893763180
--- /dev/null
+++ b/typeshed/subprocess.pyi
@@ -0,0 +1,1053 @@
+import sys
+from _typeshed import AnyPath
+from types import TracebackType
+from typing import IO, Any, AnyStr, Callable, Generic, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, overload
+from typing_extensions import Literal
+
+if sys.version_info >= (3, 9):
+ from types import GenericAlias
+
+# We prefer to annotate inputs to methods (eg subprocess.check_call) with these
+# union types.
+# For outputs we use laborious literal based overloads to try to determine
+# which specific return types to use, and prefer to fall back to Any when
+# this does not work, so the caller does not have to use an assertion to confirm
+# which type.
+#
+# For example:
+#
+# try:
+# x = subprocess.check_output(["ls", "-l"])
+# reveal_type(x) # bytes, based on the overloads
+# except TimeoutError as e:
+# reveal_type(e.cmd) # Any, but morally is _CMD
+_FILE = Union[None, int, IO[Any]]
+_TXT = Union[bytes, str]
+# Python 3.6 does't support _CMD being a single PathLike.
+# See: https://bugs.python.org/issue31961
+_CMD = Union[_TXT, Sequence[AnyPath]]
+_ENV = Union[Mapping[bytes, _TXT], Mapping[str, _TXT]]
+
+_S = TypeVar("_S")
+_T = TypeVar("_T")
+
+class CompletedProcess(Generic[_T]):
+ # morally: _CMD
+ args: Any
+ returncode: Optional[int] # this optional is REQUIRED for mypyc
+ # These are really both Optional, but requiring checks would be tedious
+ # and writing all the overloads would be horrific.
+ stdout: _T
+ stderr: _T
+ def __init__(self, args: _CMD, returncode: int, stdout: Optional[_T] = ..., stderr: Optional[_T] = ...) -> None: ...
+ def check_returncode(self) -> None: ...
+ if sys.version_info >= (3, 9):
+ def __class_getitem__(cls, item: Any) -> GenericAlias: ...
+
+if sys.version_info >= (3, 7):
+ # Nearly the same args as for 3.6, except for capture_output and text
+ @overload
+ def run(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ capture_output: bool = ...,
+ check: bool = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ input: Optional[str] = ...,
+ text: Literal[True],
+ timeout: Optional[float] = ...,
+ ) -> CompletedProcess[str]: ...
+ @overload
+ def run(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ capture_output: bool = ...,
+ check: bool = ...,
+ encoding: str,
+ errors: Optional[str] = ...,
+ input: Optional[str] = ...,
+ text: Optional[bool] = ...,
+ timeout: Optional[float] = ...,
+ ) -> CompletedProcess[str]: ...
+ @overload
+ def run(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ capture_output: bool = ...,
+ check: bool = ...,
+ encoding: Optional[str] = ...,
+ errors: str,
+ input: Optional[str] = ...,
+ text: Optional[bool] = ...,
+ timeout: Optional[float] = ...,
+ ) -> CompletedProcess[str]: ...
+ @overload
+ def run(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ *,
+ universal_newlines: Literal[True],
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ # where the *real* keyword only args start
+ capture_output: bool = ...,
+ check: bool = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ input: Optional[str] = ...,
+ text: Optional[bool] = ...,
+ timeout: Optional[float] = ...,
+ ) -> CompletedProcess[str]: ...
+ @overload
+ def run(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: Literal[False] = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ capture_output: bool = ...,
+ check: bool = ...,
+ encoding: None = ...,
+ errors: None = ...,
+ input: Optional[bytes] = ...,
+ text: Literal[None, False] = ...,
+ timeout: Optional[float] = ...,
+ ) -> CompletedProcess[bytes]: ...
+ @overload
+ def run(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ capture_output: bool = ...,
+ check: bool = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ input: Optional[_TXT] = ...,
+ text: Optional[bool] = ...,
+ timeout: Optional[float] = ...,
+ ) -> CompletedProcess[Any]: ...
+
+else:
+ # Nearly same args as Popen.__init__ except for timeout, input, and check
+ @overload
+ def run(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ check: bool = ...,
+ encoding: str,
+ errors: Optional[str] = ...,
+ input: Optional[str] = ...,
+ timeout: Optional[float] = ...,
+ ) -> CompletedProcess[str]: ...
+ @overload
+ def run(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ check: bool = ...,
+ encoding: Optional[str] = ...,
+ errors: str,
+ input: Optional[str] = ...,
+ timeout: Optional[float] = ...,
+ ) -> CompletedProcess[str]: ...
+ @overload
+ def run(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ *,
+ universal_newlines: Literal[True],
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ # where the *real* keyword only args start
+ check: bool = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ input: Optional[str] = ...,
+ timeout: Optional[float] = ...,
+ ) -> CompletedProcess[str]: ...
+ @overload
+ def run(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: Literal[False] = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ check: bool = ...,
+ encoding: None = ...,
+ errors: None = ...,
+ input: Optional[bytes] = ...,
+ timeout: Optional[float] = ...,
+ ) -> CompletedProcess[bytes]: ...
+ @overload
+ def run(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ check: bool = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ input: Optional[_TXT] = ...,
+ timeout: Optional[float] = ...,
+ ) -> CompletedProcess[Any]: ...
+
+# Same args as Popen.__init__
+def call(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ timeout: Optional[float] = ...,
+) -> int: ...
+
+# Same args as Popen.__init__
+def check_call(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: AnyPath = ...,
+ stdin: _FILE = ...,
+ stdout: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ timeout: Optional[float] = ...,
+) -> int: ...
+
+if sys.version_info >= (3, 7):
+ # 3.7 added text
+ @overload
+ def check_output(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ timeout: Optional[float] = ...,
+ input: _TXT = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ text: Literal[True],
+ ) -> str: ...
+ @overload
+ def check_output(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ timeout: Optional[float] = ...,
+ input: _TXT = ...,
+ encoding: str,
+ errors: Optional[str] = ...,
+ text: Optional[bool] = ...,
+ ) -> str: ...
+ @overload
+ def check_output(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ timeout: Optional[float] = ...,
+ input: _TXT = ...,
+ encoding: Optional[str] = ...,
+ errors: str,
+ text: Optional[bool] = ...,
+ ) -> str: ...
+ @overload
+ def check_output(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ *,
+ universal_newlines: Literal[True],
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ # where the real keyword only ones start
+ timeout: Optional[float] = ...,
+ input: _TXT = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ text: Optional[bool] = ...,
+ ) -> str: ...
+ @overload
+ def check_output(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: Literal[False] = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ timeout: Optional[float] = ...,
+ input: _TXT = ...,
+ encoding: None = ...,
+ errors: None = ...,
+ text: Literal[None, False] = ...,
+ ) -> bytes: ...
+ @overload
+ def check_output(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ timeout: Optional[float] = ...,
+ input: _TXT = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ text: Optional[bool] = ...,
+ ) -> Any: ... # morally: -> _TXT
+
+else:
+ @overload
+ def check_output(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ timeout: Optional[float] = ...,
+ input: _TXT = ...,
+ encoding: str,
+ errors: Optional[str] = ...,
+ ) -> str: ...
+ @overload
+ def check_output(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ timeout: Optional[float] = ...,
+ input: _TXT = ...,
+ encoding: Optional[str] = ...,
+ errors: str,
+ ) -> str: ...
+ @overload
+ def check_output(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ universal_newlines: Literal[True],
+ timeout: Optional[float] = ...,
+ input: _TXT = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ ) -> str: ...
+ @overload
+ def check_output(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: Literal[False] = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ timeout: Optional[float] = ...,
+ input: _TXT = ...,
+ encoding: None = ...,
+ errors: None = ...,
+ ) -> bytes: ...
+ @overload
+ def check_output(
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: _FILE = ...,
+ stderr: _FILE = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Any = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ timeout: Optional[float] = ...,
+ input: _TXT = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ ) -> Any: ... # morally: -> _TXT
+
+PIPE: int
+STDOUT: int
+DEVNULL: int
+
+class SubprocessError(Exception): ...
+
+class TimeoutExpired(SubprocessError):
+ def __init__(self, cmd: _CMD, timeout: float, output: Optional[_TXT] = ..., stderr: Optional[_TXT] = ...) -> None: ...
+ # morally: _CMD
+ cmd: Any
+ timeout: float
+ # morally: Optional[_TXT]
+ output: Any
+ stdout: Any
+ stderr: Any
+
+class CalledProcessError(SubprocessError):
+ returncode: int
+ # morally: _CMD
+ cmd: Any
+ # morally: Optional[_TXT]
+ output: Any
+
+ # morally: Optional[_TXT]
+ stdout: Any
+ stderr: Any
+ def __init__(self, returncode: int, cmd: _CMD, output: Optional[_TXT] = ..., stderr: Optional[_TXT] = ...) -> None: ...
+
+class Popen(Generic[AnyStr]):
+ args: _CMD
+ stdin: Optional[IO[AnyStr]]
+ stdout: Optional[IO[AnyStr]]
+ stderr: Optional[IO[AnyStr]]
+ pid: int
+ returncode: Optional[int] # this is REQUIRED for mypyc
+ universal_newlines: bool
+
+ # Technically it is wrong that Popen provides __new__ instead of __init__
+ # but this shouldn't come up hopefully?
+
+ if sys.version_info >= (3, 7):
+ # text is added in 3.7
+ @overload
+ def __new__(
+ cls,
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: Optional[_FILE] = ...,
+ stdout: Optional[_FILE] = ...,
+ stderr: Optional[_FILE] = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Optional[Any] = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ text: Optional[bool] = ...,
+ encoding: str,
+ errors: Optional[str] = ...,
+ ) -> Popen[str]: ...
+ @overload
+ def __new__(
+ cls,
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: Optional[_FILE] = ...,
+ stdout: Optional[_FILE] = ...,
+ stderr: Optional[_FILE] = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Optional[Any] = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ text: Optional[bool] = ...,
+ encoding: Optional[str] = ...,
+ errors: str,
+ ) -> Popen[str]: ...
+ @overload
+ def __new__(
+ cls,
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: Optional[_FILE] = ...,
+ stdout: Optional[_FILE] = ...,
+ stderr: Optional[_FILE] = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ *,
+ universal_newlines: Literal[True],
+ startupinfo: Optional[Any] = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ # where the *real* keyword only args start
+ text: Optional[bool] = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ ) -> Popen[str]: ...
+ @overload
+ def __new__(
+ cls,
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: Optional[_FILE] = ...,
+ stdout: Optional[_FILE] = ...,
+ stderr: Optional[_FILE] = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Optional[Any] = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ text: Literal[True],
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ ) -> Popen[str]: ...
+ @overload
+ def __new__(
+ cls,
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: Optional[_FILE] = ...,
+ stdout: Optional[_FILE] = ...,
+ stderr: Optional[_FILE] = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: Literal[False] = ...,
+ startupinfo: Optional[Any] = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ text: Literal[None, False] = ...,
+ encoding: None = ...,
+ errors: None = ...,
+ ) -> Popen[bytes]: ...
+ @overload
+ def __new__(
+ cls,
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: Optional[_FILE] = ...,
+ stdout: Optional[_FILE] = ...,
+ stderr: Optional[_FILE] = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Optional[Any] = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ text: Optional[bool] = ...,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ ) -> Popen[Any]: ...
+ else:
+ @overload
+ def __new__(
+ cls,
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: Optional[_FILE] = ...,
+ stdout: Optional[_FILE] = ...,
+ stderr: Optional[_FILE] = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Optional[Any] = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ encoding: str,
+ errors: Optional[str] = ...,
+ ) -> Popen[str]: ...
+ @overload
+ def __new__(
+ cls,
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: Optional[_FILE] = ...,
+ stdout: Optional[_FILE] = ...,
+ stderr: Optional[_FILE] = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Optional[Any] = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ encoding: Optional[str] = ...,
+ errors: str,
+ ) -> Popen[str]: ...
+ @overload
+ def __new__(
+ cls,
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: Optional[_FILE] = ...,
+ stdout: Optional[_FILE] = ...,
+ stderr: Optional[_FILE] = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ *,
+ universal_newlines: Literal[True],
+ startupinfo: Optional[Any] = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ # where the *real* keyword only args start
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ ) -> Popen[str]: ...
+ @overload
+ def __new__(
+ cls,
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: Optional[_FILE] = ...,
+ stdout: Optional[_FILE] = ...,
+ stderr: Optional[_FILE] = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: Literal[False] = ...,
+ startupinfo: Optional[Any] = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ encoding: None = ...,
+ errors: None = ...,
+ ) -> Popen[bytes]: ...
+ @overload
+ def __new__(
+ cls,
+ args: _CMD,
+ bufsize: int = ...,
+ executable: Optional[AnyPath] = ...,
+ stdin: Optional[_FILE] = ...,
+ stdout: Optional[_FILE] = ...,
+ stderr: Optional[_FILE] = ...,
+ preexec_fn: Optional[Callable[[], Any]] = ...,
+ close_fds: bool = ...,
+ shell: bool = ...,
+ cwd: Optional[AnyPath] = ...,
+ env: Optional[_ENV] = ...,
+ universal_newlines: bool = ...,
+ startupinfo: Optional[Any] = ...,
+ creationflags: int = ...,
+ restore_signals: bool = ...,
+ start_new_session: bool = ...,
+ pass_fds: Any = ...,
+ *,
+ encoding: Optional[str] = ...,
+ errors: Optional[str] = ...,
+ ) -> Popen[Any]: ...
+ def poll(self) -> Optional[int]: ...
+ if sys.version_info >= (3, 7):
+ def wait(self, timeout: Optional[float] = ...) -> int: ...
+ else:
+ def wait(self, timeout: Optional[float] = ..., endtime: Optional[float] = ...) -> int: ...
+ # Return str/bytes
+ def communicate(
+ self,
+ input: Optional[AnyStr] = ...,
+ timeout: Optional[float] = ...,
+ ) -> Tuple[Optional[AnyStr], AnyStr]: ... # this optional is REQUIRED for mypyc
+ def send_signal(self, sig: int) -> None: ...
+ def terminate(self) -> None: ...
+ def kill(self) -> None: ...
+ def __enter__(self: _S) -> _S: ...
+ def __exit__(
+ self, type: Optional[Type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType]
+ ) -> None: ...
+ if sys.version_info >= (3, 9):
+ def __class_getitem__(cls, item: Any) -> GenericAlias: ...
+
+# The result really is always a str.
+def getstatusoutput(cmd: _TXT) -> Tuple[int, str]: ...
+def getoutput(cmd: _TXT) -> str: ...
+def list2cmdline(seq: Sequence[str]) -> str: ... # undocumented
+
+if sys.platform == "win32":
+ class STARTUPINFO:
+ if sys.version_info >= (3, 7):
+ def __init__(
+ self,
+ *,
+ dwFlags: int = ...,
+ hStdInput: Optional[Any] = ...,
+ hStdOutput: Optional[Any] = ...,
+ hStdError: Optional[Any] = ...,
+ wShowWindow: int = ...,
+ lpAttributeList: Optional[Mapping[str, Any]] = ...,
+ ) -> None: ...
+ dwFlags: int
+ hStdInput: Optional[Any]
+ hStdOutput: Optional[Any]
+ hStdError: Optional[Any]
+ wShowWindow: int
+ if sys.version_info >= (3, 7):
+ lpAttributeList: Mapping[str, Any]
+ STD_INPUT_HANDLE: Any
+ STD_OUTPUT_HANDLE: Any
+ STD_ERROR_HANDLE: Any
+ SW_HIDE: int
+ STARTF_USESTDHANDLES: int
+ STARTF_USESHOWWINDOW: int
+ CREATE_NEW_CONSOLE: int
+ CREATE_NEW_PROCESS_GROUP: int
+ if sys.version_info >= (3, 7):
+ ABOVE_NORMAL_PRIORITY_CLASS: int
+ BELOW_NORMAL_PRIORITY_CLASS: int
+ HIGH_PRIORITY_CLASS: int
+ IDLE_PRIORITY_CLASS: int
+ NORMAL_PRIORITY_CLASS: int
+ REALTIME_PRIORITY_CLASS: int
+ CREATE_NO_WINDOW: int
+ DETACHED_PROCESS: int
+ CREATE_DEFAULT_ERROR_MODE: int
+ CREATE_BREAKAWAY_FROM_JOB: int
diff --git a/windowsdoc.md b/windowsdoc.md
deleted file mode 100644
index 215fa6fd69..0000000000
--- a/windowsdoc.md
+++ /dev/null
@@ -1,142 +0,0 @@
-# Windows Compatibility
-The CWL reference runner, cwltool, is compatible with Microsoft Windows when
-Docker is installed. On Windows, all CWL CommandLineTools are executed using
-[Docker software containers](https://docs.docker.com/docker-for-windows/). The
-default Docker Container is
-[Alpine with Bash support](https://github.com/frol/docker-alpine-bash). You can
-specify other Docker Containers for your tools and workflows using hints,
-[requirements](http://www.commonwl.org/v1.0/CommandLineTool.html#DockerRequirement)),
-or the `--default-container` cwltool option.
-
-## Supported Windows versions
-* Windows 10 with native [Docker for Windows](https://docs.docker.com/docker-for-windows/).
-* Windows 7, 8, and 8.1 with [Docker ToolBox](https://docs.docker.com/toolbox/toolbox_install_windows/).
-
-If you are using Docker Toolbox, then you must run cwltool in the Docker
-Quickstart Terminal.
-
-## Installation
-
-You can install cwltool using pip or directly from source code.
-
-### Requirements
-
-Before installing cwltool, please install:
-
-* [Python 3](https://www.python.org/downloads/windows/)
-* [Docker](https://docs.docker.com/docker-for-windows/install/)
-* [Node.js](https://nodejs.org/en/download/) (optional, please install if your
- workflows or tools contain [Javascript Expressions](http://www.commonwl.org/v1.0/CommandLineTool.html#InlineJavascriptRequirement))
-
-### Install using pip (recommended)
-
-```
-pip install cwltool
-```
-
-### Install from source
-
-```
-git clone https://github.com/common-workflow-language/cwltool.git
-cd cwltool
-pip install .
-```
-
-***Note:*** In order to test if cwltool has been successfully installed on your
-Windows system, run `cwltool` in `cmd`. If you see help instructions, cwltool was successfully installed.
-
-```
- CWL document required, no input file was provided
- usage: cwltool [-h] [--basedir BASEDIR] [--outdir OUTDIR] [--no-container]
- [--preserve-environment ENVVAR] [--preserve-entire-environment]
- [--rm-container | --leave-container]
- [--tmpdir-prefix TMPDIR_PREFIX]
- .......................
-```
-
-## Running tests
-
-There are two types of tests available for cwltool: unit tests and conformance tests.
-
-### Unit tests
-
-To run cwltool's unit tests, run the following command:
-```
-python3 -m pytest --pyarg cwltool
-```
-
-Or go to the checkout of the cwltool Git repository on your system and run:
-
-```
-python3 setup.py test
-```
-
-
-
-### Conformance tests
-
-To run the CWL conformance tests, follow these instructions:
-
-```
-pip install cwltest
-git clone https://github.com/common-workflow-language/common-workflow-language.git
-cd common-workflow-language/v1.0
-cwltest --test conformance_test_v1.0.yaml -j 4 --tool cwltool
-```
-The `-j` options is used to run multiple tests in parallel.
-
-## Troubleshooting
-
-You may encounter some problems with Docker on Windows.
-
-### Docker doesn't work on Windows, even outside cwltool
-
-Make sure you followed all instructions carefully while installing Docker.
-Please check the Environment variables. If the problem persists, we recommend
-consulting the [online Docker Community](https://forums.docker.com/).
-
-### Your local drives are not being shared with Docker Containers
-
-* ***On native Docker for Windows (supported by Windows 10):***
-On your tray, next to your clock, right-click on Docker, then click on Settings,
-there you'll find the shared rdives: Here you can share your drives with Docker.
-If you encounter a problem with your firewall, please
-[refer this to post](https://blog.olandese.nl/2017/05/03/solve-docker-for-windows-error-a-firewall-is-blocking-file-sharing-between-windows-and-the-containers/).
-
-* ***On Docker Toolbox:***
-Docker Toolbox uses Virtualbox to create a linux base on which Docker machine runs.
-Your Docker Container will be created inside Virtualbox. To share drives
-in virtualbox, go to ****Virtualbox->settings->shared folders->Machine Folders****
-Here Map the drives you want to share with your Docker Container.
-If you want to keep these settings permanent (Recommended!), You should mark the
-`make permanent` checkbox or else these settings will be erased every time your
-virtualbox closes.
-
-### In a Docker Container with shared drives, not all files are shown on `ls`
-
-This means your drives/folders are not shared properly. Docker uses caching,
-which may result in not all files and folders being listed on ls. In order to
-solve this problem, make your drive mapping settings permanent (see previous
-question).
-
-### Can't create/modify a file in Docker when using cwltool
-
-When folders are shared with Docker Container, they inherit their current file
-access permissions. If you can write to a folder (with your current privileges)
-on your local machine, you should be able to write to that folder inside Docker
-Container also (provided same user initiated Docker). In all it is a file
-permission issue.
-
-### Workflows with Javascript Expressions occasionally give Timeout errors
-To evaluate Javascript Expressions, cwltool looks for Nodejs on your system.
-In case Nodejs isn't installed, JS expressions are executed in a Docker Container.
-In order to avoid waiting forever in case error occurs, cwltool times out js
-expression evaluation after a timeout period (by default 20 seconds). You can
-provide a custom timeout period using `--eval-timeout` argument. So if you face
-this error, the best option is to install Nodejs on your local system. If you
-can't then use the `--eval-timeout` argument and set a higher timeout value.
-
-*If you still have problems with setting up and using Docker on Windows, please
-consult the online Docker Community. If the problem is specific to cwltool,
-create an [issue on cwltool](https://github.com/common-workflow-language/cwltool/issues).*
-