Skip to content

Commit

Permalink
Fix loading of vllm model from files (#533)
Browse files Browse the repository at this point in the history
* commit

* commit

* Update src/lighteval/main_vllm.py

* commit

* change doc

* change doc

* change doc
  • Loading branch information
NathanHB authored Feb 10, 2025
1 parent 86f6225 commit d4e6f59
Show file tree
Hide file tree
Showing 5 changed files with 62 additions and 19 deletions.
39 changes: 25 additions & 14 deletions docs/source/use-vllm-as-backend.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -29,20 +29,31 @@ lighteval vllm \
"leaderboard|truthfulqa:mc|0|0"
```

Available arguments for `vllm` can be found in the `VLLMModelConfig`:

- **pretrained** (str): HuggingFace Hub model ID name or the path to a pre-trained model to load.
- **gpu_memory_utilisation** (float): The fraction of GPU memory to use.
- **revision** (str): The revision of the model.
- **dtype** (str, None): The data type to use for the model.
- **tensor_parallel_size** (int): The number of tensor parallel units to use.
- **data_parallel_size** (int): The number of data parallel units to use.
- **max_model_length** (int): The maximum length of the model.
- **swap_space** (int): The CPU swap space size (GiB) per GPU.
- **seed** (int): The seed to use for the model.
- **trust_remote_code** (bool): Whether to trust remote code during model loading.
- **add_special_tokens** (bool): Whether to add special tokens to the input sequences.
- **multichoice_continuations_start_space** (bool): Whether to add a space at the start of each continuation in multichoice generation.
## Use a config file

For more advanced configurations, you can use a config file for the model.
An example of a config file is shown below and can be found at `examples/model_configs/vllm_model_config.yaml`.

```bash
lighteval vllm \
"examples/model_configs/vllm_model_config.yaml" \
"leaderboard|truthfulqa:mc|0|0"
```

```yaml
model: # Model specific parameters
base_params:
model_args: "pretrained=HuggingFaceTB/SmolLM-1.7B,revision=main,dtype=bfloat16" # Model args that you would pass in the command line
generation: # Generation specific parameters
temperature: 0.3
repetition_penalty: 1.0
frequency_penalty: 0.0
presence_penalty: 0.0
seed: 42
top_k: 0
min_p: 0.0
top_p: 0.9
```
> [!WARNING]
> In the case of OOM issues, you might need to reduce the context size of the
Expand Down
14 changes: 14 additions & 0 deletions examples/model_configs/vllm_model_config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
model:
base_params:
model_args: "pretrained=HuggingFaceTB/SmolLM-1.7B,revision=main,dtype=bfloat16" # pretrained=model_name,trust_remote_code=boolean,revision=revision_to_use,model_parallel=True ...
generation:
temperature: 0.3
repetition_penalty: 1.0
frequency_penalty: 0.0
presence_penalty: 0.0
seed: 42
top_k: -1
min_p: 0.0
top_p: 0.9
max_new_tokens: 100
stop_tokens: ["<EOS>", "<PAD>"]
9 changes: 5 additions & 4 deletions src/lighteval/main_vllm.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,12 +133,13 @@ def vllm(
if model_args.endswith(".yaml"):
with open(model_args, "r") as f:
config = yaml.safe_load(f)["model"]
model_args = config["base_params"]["model_args"]
generation_parameters = GenerationParameters.from_dict(config)
model_config = VLLMModelConfig(config, generation_parameters=generation_parameters)

else:
model_args_dict: dict = {k.split("=")[0]: k.split("=")[1] if "=" in k else True for k in model_args.split(",")}
model_config = VLLMModelConfig(**model_args_dict)
generation_parameters = GenerationParameters()

model_args_dict: dict = {k.split("=")[0]: k.split("=")[1] if "=" in k else True for k in model_args.split(",")}
model_config = VLLMModelConfig(**model_args_dict, generation_parameters=generation_parameters)

pipeline = Pipeline(
tasks=tasks,
Expand Down
17 changes: 17 additions & 0 deletions src/lighteval/models/model_input.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,23 @@ def from_dict(cls, config_dict: dict):
"""
return GenerationParameters(**config_dict.get("generation", {}))

def to_vllm_dict(self) -> dict:
"""Selects relevant generation and sampling parameters for vllm models.
Doc: https://docs.vllm.ai/en/v0.5.5/dev/sampling_params.html
Returns:
dict: The parameters to create a vllm.SamplingParams in the model config.
"""
sampling_params_to_vllm_naming = {
"max_new_tokens": "max_tokens",
"min_new_tokens": "min_tokens",
"stop_tokens": "stop",
}

# Task specific sampling params to set in model: n, best_of, use_beam_search
# Generation specific params to set in model: logprobs, prompt_logprobs
return {sampling_params_to_vllm_naming.get(k, k): v for k, v in asdict(self).items() if v is not None}

def to_vllm_openai_dict(self) -> dict:
"""Selects relevant generation and sampling parameters for vllm and openai models.
Doc: https://docs.vllm.ai/en/v0.5.5/dev/sampling_params.html
Expand Down
2 changes: 1 addition & 1 deletion src/lighteval/models/vllm/vllm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def __init__(
self.precision = _get_dtype(config.dtype, config=self._config)

self.model_info = ModelInfo(model_name=self.model_name, model_sha=self.model_sha)
self.sampling_params = SamplingParams(**config.generation_parameters.to_vllm_openai_dict())
self.sampling_params = SamplingParams(**config.generation_parameters.to_vllm_dict())
self.pairwise_tokenization = config.pairwise_tokenization

@property
Expand Down

0 comments on commit d4e6f59

Please sign in to comment.