forked from huggingface/diffusers
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrequirements.txt
158 lines (158 loc) · 3.08 KB
/
requirements.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
# This file was autogenerated by uv via the following command:
# uv pip compile requirements.in -o requirements.txt
aiohappyeyeballs==2.4.3
# via aiohttp
aiohttp==3.10.11
# via -r requirements.in
aiosignal==1.3.1
# via aiohttp
annotated-types==0.7.0
# via pydantic
anyio==4.6.2.post1
# via starlette
attrs==24.2.0
# via aiohttp
certifi==2024.8.30
# via requests
charset-normalizer==3.4.0
# via requests
click==8.1.7
# via uvicorn
fastapi==0.115.3
# via -r requirements.in
filelock==3.16.1
# via
# huggingface-hub
# torch
# transformers
# triton
frozenlist==1.5.0
# via
# aiohttp
# aiosignal
fsspec==2024.10.0
# via
# huggingface-hub
# torch
h11==0.14.0
# via uvicorn
huggingface-hub==0.26.1
# via
# tokenizers
# transformers
idna==3.10
# via
# anyio
# requests
# yarl
jinja2==3.1.4
# via torch
markupsafe==3.0.2
# via jinja2
mpmath==1.3.0
# via sympy
multidict==6.1.0
# via
# aiohttp
# yarl
networkx==3.4.2
# via torch
numpy==2.1.2
# via transformers
nvidia-cublas-cu12==12.1.3.1
# via
# nvidia-cudnn-cu12
# nvidia-cusolver-cu12
# torch
nvidia-cuda-cupti-cu12==12.1.105
# via torch
nvidia-cuda-nvrtc-cu12==12.1.105
# via torch
nvidia-cuda-runtime-cu12==12.1.105
# via torch
nvidia-cudnn-cu12==9.1.0.70
# via torch
nvidia-cufft-cu12==11.0.2.54
# via torch
nvidia-curand-cu12==10.3.2.106
# via torch
nvidia-cusolver-cu12==11.4.5.107
# via torch
nvidia-cusparse-cu12==12.1.0.106
# via
# nvidia-cusolver-cu12
# torch
nvidia-nccl-cu12==2.20.5
# via torch
nvidia-nvjitlink-cu12==12.6.85
# via
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvtx-cu12==12.1.105
# via torch
packaging==24.1
# via
# huggingface-hub
# transformers
prometheus-client==0.21.0
# via
# -r requirements.in
# prometheus-fastapi-instrumentator
prometheus-fastapi-instrumentator==7.0.0
# via -r requirements.in
propcache==0.2.0
# via yarl
py-consul==1.5.3
# via -r requirements.in
pydantic==2.9.2
# via fastapi
pydantic-core==2.23.4
# via pydantic
pyyaml==6.0.2
# via
# huggingface-hub
# transformers
regex==2024.9.11
# via transformers
requests==2.32.3
# via
# huggingface-hub
# py-consul
# transformers
safetensors==0.4.5
# via transformers
sentencepiece==0.2.0
# via -r requirements.in
sniffio==1.3.1
# via anyio
starlette==0.41.0
# via
# fastapi
# prometheus-fastapi-instrumentator
sympy==1.13.3
# via torch
tokenizers==0.20.1
# via transformers
torch==2.4.1
# via -r requirements.in
tqdm==4.66.5
# via
# huggingface-hub
# transformers
transformers==4.46.1
# via -r requirements.in
triton==3.0.0
# via torch
typing-extensions==4.12.2
# via
# fastapi
# huggingface-hub
# pydantic
# pydantic-core
# torch
urllib3==2.2.3
# via requests
uvicorn==0.32.0
# via -r requirements.in
yarl==1.16.0
# via aiohttp