Skip to content

Commit

Permalink
fix after rebasing, now test isn't working
Browse files Browse the repository at this point in the history
  • Loading branch information
V2arK committed Feb 4, 2025
1 parent f9f9101 commit 3a224f5
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 14 deletions.
18 changes: 13 additions & 5 deletions llama_stack/providers/registry/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,15 +216,23 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
adapter_type="centml",
adapter_type="runpod",
pip_packages=["openai"],
module="llama_stack.providers.remote.inference.runpod",
config_class=
"llama_stack.providers.remote.inference.runpod.RunpodImplConfig",
),
),
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
adapter_type="sambanova",
pip_packages=[
"openai",
],
module="llama_stack.providers.remote.inference.centml",
module="llama_stack.providers.remote.inference.sambanova",
config_class=
"llama_stack.providers.remote.inference.centml.CentMLImplConfig",
provider_data_validator=
"llama_stack.providers.remote.inference.centml.CentMLProviderDataValidator",
"llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig",
),
),
remote_provider_spec(
Expand Down
4 changes: 4 additions & 0 deletions llama_stack/providers/remote/inference/centml/centml.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
ChatCompletionRequest,
ChatCompletionResponse,
CompletionRequest,
CompletionResponse,
EmbeddingsResponse,
Inference,
LogProbConfig,
Expand All @@ -25,6 +26,7 @@
ResponseFormatType,
SamplingParams,
ToolChoice,
ToolConfig,
ToolDefinition,
ToolPromptFormat,
)
Expand All @@ -42,6 +44,7 @@
process_completion_stream_response,
)
from llama_stack.providers.utils.inference.prompt_adapter import (
chat_completion_request_to_prompt,
completion_request_to_prompt,
content_has_media,
interleaved_content_as_str,
Expand Down Expand Up @@ -176,6 +179,7 @@ async def chat_completion(
response_format: Optional[ResponseFormat] = None,
stream: Optional[bool] = False,
logprobs: Optional[LogProbConfig] = None,
tool_config: Optional[ToolConfig] = None,
) -> AsyncGenerator:
"""
For "chat completion" style requests.
Expand Down
5 changes: 3 additions & 2 deletions llama_stack/templates/centml/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ distribution_spec:
providers:
inference:
- remote::centml
memory:
vector_io:
- inline::faiss
- remote::chromadb
- remote::pgvector
Expand All @@ -28,5 +28,6 @@ distribution_spec:
- remote::brave-search
- remote::tavily-search
- inline::code-interpreter
- inline::memory-runtime
- inline::rag-runtime
- remote::model-context-protocol
image_type: conda
17 changes: 10 additions & 7 deletions llama_stack/templates/centml/run.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,11 @@ apis:
- datasetio
- eval
- inference
- memory
- safety
- scoring
- telemetry
- tool_runtime
- vector_io
providers:
inference:
- provider_id: centml
Expand All @@ -22,7 +22,7 @@ providers:
provider_type: inline::sentence-transformers
config: {}

memory:
vector_io:
- provider_id: faiss
provider_type: inline::faiss
config:
Expand Down Expand Up @@ -92,8 +92,11 @@ providers:
- provider_id: code-interpreter
provider_type: inline::code-interpreter
config: {}
- provider_id: memory-runtime
provider_type: inline::memory-runtime
- provider_id: rag-runtime
provider_type: inline::rag-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}

metadata_store:
Expand All @@ -116,14 +119,14 @@ models:
shields:
- shield_id: meta-llama/Llama-Guard-3-8B

memory_banks: []
vector_dbs: []
datasets: []
scoring_fns: []
eval_tasks: []
tool_groups:
- toolgroup_id: builtin::websearch
provider_id: tavily-search
- toolgroup_id: builtin::memory
provider_id: memory-runtime
- toolgroup_id: builtin::rag
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter

0 comments on commit 3a224f5

Please sign in to comment.