diff --git a/docker_images/diffusers/app/pipelines/image_to_image.py b/docker_images/diffusers/app/pipelines/image_to_image.py index 6c921b2e..bbd5e1f0 100644 --- a/docker_images/diffusers/app/pipelines/image_to_image.py +++ b/docker_images/diffusers/app/pipelines/image_to_image.py @@ -39,13 +39,19 @@ def __init__(self, model_id: str): # This should allow us to make the image work with private models when no token is provided, if the said model # is already in local cache - cache_folder_name = file_download.repo_folder_name(repo_id=model_id, repo_type="model") + cache_folder_name = file_download.repo_folder_name( + repo_id=model_id, repo_type="model" + ) filename = os.path.join(cache_folder_name, "hub_model_info.json") try: - with open(filename, 'r') as f: + with open(filename, "r") as f: model_data = json.load(f) except OSError: - logger.info("No cached model info found in %s for model %s. Fetching on the hub", filename, model_id) + logger.info( + "No cached model info found in %s for model %s. Fetching on the hub", + filename, + model_id, + ) model_data = model_info(model_id, token=self.use_auth_token) else: model_data = hf_api.ModelInfo(**model_data) diff --git a/docker_images/diffusers/app/pipelines/text_to_image.py b/docker_images/diffusers/app/pipelines/text_to_image.py index 7c92a5c9..4b6d46d3 100644 --- a/docker_images/diffusers/app/pipelines/text_to_image.py +++ b/docker_images/diffusers/app/pipelines/text_to_image.py @@ -30,13 +30,19 @@ def __init__(self, model_id: str): # This should allow us to make the image work with private models when no token is provided, if the said model # is already in local cache - cache_folder_name = file_download.repo_folder_name(repo_id=model_id, repo_type="model") + cache_folder_name = file_download.repo_folder_name( + repo_id=model_id, repo_type="model" + ) filename = os.path.join(cache_folder_name, "hub_model_info.json") try: - with open(filename, 'r') as f: + with open(filename, "r") as f: model_data = json.load(f) except OSError: - logger.info("No cached model info found in %s for model %s. Fetching on the hub", filename, model_id) + logger.info( + "No cached model info found in %s for model %s. Fetching on the hub", + filename, + model_id, + ) model_data = model_info(model_id, token=self.use_auth_token) else: model_data = hf_api.ModelInfo(**model_data) @@ -57,7 +63,9 @@ def __init__(self, model_id: str): model_type = "LoraModel" elif has_model_index: config_file = hf_hub_download( - model_id, "model_index.json", token=self.use_auth_token, local_files_only=self.offline, + model_id, + "model_index.json", + token=self.use_auth_token, ) with open(config_file, "r") as f: config_dict = json.load(f)