Skip to content

✈️ Introduce Jetstream/Pytorch in TGI #232

✈️ Introduce Jetstream/Pytorch in TGI

✈️ Introduce Jetstream/Pytorch in TGI #232

name: Optimum TPU / Test TGI on TPU
on:
push:
branches: [ main ]
paths:
- "text-generation-inference/**"
pull_request:
branches: [ main ]
paths:
- "text-generation-inference/**"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
do-the-job:
name: Run TGI tests
runs-on: optimum-tpu
container:
# Use a nightly image that works with TPU (release was not working)
image: us-central1-docker.pkg.dev/tpu-pytorch-releases/docker/xla@sha256:48b1d3ab080613fd88234019daf77ef7812b518acb13c54ddad03bf770d6ac57
options: --shm-size "16gb" --ipc host --privileged
env:
PJRT_DEVICE: TPU
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Build and test TGI server
# run: |
# HF_TOKEN=${{ secrets.HF_TOKEN_OPTIMUM_TPU_CI }} make tgi_test
run: |
make test_installs tgi_server
find text-generation-inference/ -name "text_generation_server-*whl" -exec python -m pip install {} \;
# Use a different step to test the Jetstream Pytorch version, to avoid conflicts with torch-xla[tpu]
- name: Install and test TGI server (Jetstream Pytorch)
run: |
pip show text-generation-server
pip install -e .[jetstream-pt] \
-f https://storage.googleapis.com/jax-releases/jax_nightly_releases.html \
-f https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html \
-f https://storage.googleapis.com/libtpu-releases/index.html
JETSTREAM_PT=1 HF_TOKEN=${{ secrets.HF_TOKEN_OPTIMUM_TPU_CI }} python -m \
pytest -sv text-generation-inference/tests -k jetstream