Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
Signed-off-by: Raphael Glon <oOraph@users.noreply.github.com>
  • Loading branch information
oOraph committed Nov 29, 2023
1 parent 2964ade commit e5aa9e7
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 30 deletions.
16 changes: 14 additions & 2 deletions docker_images/diffusers/app/pipelines/image_to_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
StableUnCLIPImg2ImgPipeline,
StableUnCLIPPipeline,
)
from huggingface_hub import hf_hub_download, model_info
from huggingface_hub import file_download, hf_api, hf_hub_download, model_info
from PIL import Image


Expand All @@ -36,7 +36,19 @@
class ImageToImagePipeline(Pipeline):
def __init__(self, model_id: str):
use_auth_token = os.getenv("HF_API_TOKEN")
model_data = model_info(model_id, token=use_auth_token)

# This should allow us to make the image work with private models when no token is provided, if the said model
# is already in local cache
cache_folder_name = file_download.repo_folder_name(repo_id=model_id, repo_type="model")
filename = os.path.join(cache_folder_name, "hub_model_info.json")
try:
with open(filename, 'r') as f:
model_data = json.load(f)
except OSError:
logger.info("No cached model info found in %s for model %s. Fetching on the hub", filename, model_id)
model_data = model_info(model_id, token=self.use_auth_token)
else:
model_data = hf_api.ModelInfo(**model_data)

kwargs = (
{"safety_checker": None}
Expand Down
32 changes: 11 additions & 21 deletions docker_images/diffusers/app/pipelines/text_to_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import TYPE_CHECKING

import torch
from app import idle, lora, timing, validation
from app import idle, lora, timing
from app.pipelines import Pipeline
from diffusers import (
AutoencoderKL,
Expand All @@ -27,29 +27,19 @@ def __init__(self, model_id: str):
self.current_lora_adapter = None
self.model_id = None
self.use_auth_token = os.getenv("HF_API_TOKEN")

# This should allow us to make the image work with private models when no token is provided, if the said model
# is already in local cache
self.offline = validation.str_to_bool(os.getenv("HF_OFFLINE_CACHE", "0"))
if self.offline:
folder_name = file_download.repo_folder_name(repo_id=model_id, repo_type="model")
filename = os.path.join(folder_name, "hub_model_info.json")
try:
with open(filename, 'r') as f:
model_data = json.load(f)
except OSError as e1:
try:
model_data = model_info(model_id, token=self.use_auth_token)
except Exception as e2:
logger.exception(e1)
logger.exception(e2)
logger.error("If you want this image to run with cached private models without providing a token "
"or hitting the hub, you first need to store the json output of %s/api/models/%s "
"into %s",
hf_api.api.endpoint, filename)
raise Exception("Unable to get model_info for %s, missing local cache or hub access", model_id)
model_data = hf_api.ModelInfo(**model_data)
else:
cache_folder_name = file_download.repo_folder_name(repo_id=model_id, repo_type="model")
filename = os.path.join(cache_folder_name, "hub_model_info.json")
try:
with open(filename, 'r') as f:
model_data = json.load(f)
except OSError:
logger.info("No cached model info found in %s for model %s. Fetching on the hub", filename, model_id)
model_data = model_info(model_id, token=self.use_auth_token)
else:
model_data = hf_api.ModelInfo(**model_data)

kwargs = (
{"safety_checker": None}
Expand Down
7 changes: 0 additions & 7 deletions docker_images/diffusers/app/validation.py

This file was deleted.

0 comments on commit e5aa9e7

Please sign in to comment.