1
name: nv-transformers-v100
8
- 'deepspeed/inference/v2/**'
9
- 'tests/unit/inference/v2/**'
16
group: ${{ github.workflow }}-${{ github.ref }}
17
cancel-in-progress: true
21
runs-on: [self-hosted, nvidia, cu116, v100]
24
- uses: actions/checkout@v3
27
uses: ./.github/workflows/setup-venv
29
- name: Install pytorch
31
# use the same pytorch version as transformers CI
32
pip install -U --cache-dir $TORCH_CACHE torch==2.0.1+cu118 --index-url https://download.pytorch.org/whl/cu118
33
python -c "import torch; print('torch:', torch.__version__, torch)"
34
python -c "import torch; print('CUDA available:', torch.cuda.is_available())"
36
- name: Install transformers
38
git clone https://github.com/huggingface/transformers
40
# if needed switch to the last known good SHA until transformers@master is fixed
41
git checkout e7e9261a2
42
git rev-parse --short HEAD
45
- name: Install deepspeed
47
pip install .[dev,autotuning]
50
- name: Python environment
54
- name: HF transformers tests
56
unset TORCH_CUDA_ARCH_LIST # only jit compile for current arch
58
pip install .[testing]
59
# find reqs used in ds integration tests
60
find examples/pytorch -regextype posix-egrep -regex '.*(language-modeling|question-answering|summarization|image-classification|text-classification|translation).*/requirements.txt' -exec grep -v 'torch' {} \; | xargs -I {} pip install --upgrade {}
61
# force protobuf version due to issues
62
pip install "protobuf<4.21.0"
64
WANDB_DISABLED=true RUN_SLOW=1 pytest $PYTEST_OPTS tests/deepspeed