Skip to content

Commit

Permalink
change doc
Browse files Browse the repository at this point in the history
  • Loading branch information
NathanHB committed Feb 6, 2025
1 parent c75869b commit 952589e
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 2 deletions.
2 changes: 2 additions & 0 deletions docs/source/use-vllm-as-backend.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ lighteval vllm \
"leaderboard|truthfulqa:mc|0|0"
```

## Use a config file

For more advanced configurations, you can use a config file for the model.
An example of a config file is shown below and can be found at `examples/model_configs/vllm_model_config.yaml`.

Expand Down
4 changes: 3 additions & 1 deletion examples/model_configs/vllm_model_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ model:
frequency_penalty: 0.0
presence_penalty: 0.0
seed: 42
top_k: 0
top_k: -1
min_p: 0.0
top_p: 0.9
max_new_tokens: 100
stop_tokens: ["<EOS>", "<PAD>"]
17 changes: 17 additions & 0 deletions src/lighteval/models/model_input.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,23 @@ def from_dict(cls, config_dict: dict):
"""
return GenerationParameters(**config_dict.get("generation", {}))

def to_vllm_dict(self) -> dict:
"""Selects relevant generation and sampling parameters for vllm models.
Doc: https://docs.vllm.ai/en/v0.5.5/dev/sampling_params.html
Returns:
dict: The parameters to create a vllm.SamplingParams in the model config.
"""
sampling_params_to_vllm_naming = {
"max_new_tokens": "max_tokens",
"min_new_tokens": "min_tokens",
"stop_tokens": "stop",
}

# Task specific sampling params to set in model: n, best_of, use_beam_search
# Generation specific params to set in model: logprobs, prompt_logprobs
return {sampling_params_to_vllm_naming.get(k, k): v for k, v in asdict(self).items() if v is not None}

def to_vllm_openai_dict(self) -> dict:
"""Selects relevant generation and sampling parameters for vllm and openai models.
Doc: https://docs.vllm.ai/en/v0.5.5/dev/sampling_params.html
Expand Down
2 changes: 1 addition & 1 deletion src/lighteval/models/vllm/vllm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def __init__(
self.precision = _get_dtype(config.dtype, config=self._config)

self.model_info = ModelInfo(model_name=self.model_name, model_sha=self.model_sha)
self.sampling_params = SamplingParams(**config.generation_parameters.to_vllm_openai_dict())
self.sampling_params = SamplingParams(**config.generation_parameters.to_vllm_dict())
self.pairwise_tokenization = config.pairwise_tokenization

@property
Expand Down

0 comments on commit 952589e

Please sign in to comment.