From f9cdb007b98b5fbc2c252d89613365187d56702f Mon Sep 17 00:00:00 2001 From: Alvaro Moran Date: Wed, 11 Sep 2024 08:50:30 +0000 Subject: [PATCH] WIP quick ci test --- .github/workflows/test-pytorch-xla-tpu-tgi.yml | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/.github/workflows/test-pytorch-xla-tpu-tgi.yml b/.github/workflows/test-pytorch-xla-tpu-tgi.yml index 4c681941..9a0ed6d2 100644 --- a/.github/workflows/test-pytorch-xla-tpu-tgi.yml +++ b/.github/workflows/test-pytorch-xla-tpu-tgi.yml @@ -2,9 +2,7 @@ name: Optimum TPU / Test TGI on TPU on: push: - branches: [ main ] - paths: - - "text-generation-inference/**" + branches: [ quick-ci-test ] pull_request: branches: [ main ] paths: @@ -28,16 +26,8 @@ jobs: - name: Checkout uses: actions/checkout@v4 - - name: Build and test TGI server + - name: quick test run: | - HF_TOKEN=${{ secrets.HF_TOKEN_OPTIMUM_TPU_CI }} make tgi_test + python -m pip install huggingface_hub + HF_TOKEN=${{ secrets.HF_TOKEN_OPTIMUM_TPU_CI }} python -c "from huggingface_hub import snapshot_download; snapshot_download(repo_id='google/gemma-2b', allow_patterns='*.json')" - # Use a different step to test the Jetstream Pytorch version, to avoid conflicts with torch-xla[tpu] - - name: Install and test TGI server (Jetstream Pytorch) - run: | - pip install -U .[jetstream-pt] \ - -f https://storage.googleapis.com/jax-releases/jax_nightly_releases.html \ - -f https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html \ - -f https://storage.googleapis.com/libtpu-releases/index.html - JETSTREAM_PT=1 HF_TOKEN=${{ secrets.HF_TOKEN_OPTIMUM_TPU_CI }} python -m \ - pytest -sv text-generation-inference/tests -k jetstream