-
Notifications
You must be signed in to change notification settings - Fork 10
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
9be605e
commit 5bd73b7
Showing
51 changed files
with
1,326 additions
and
16 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -51,3 +51,5 @@ pytorch_forecasting==1.0.0 | |
patool | ||
openpyxl==3.1.5 | ||
GitPython==3.1.44 | ||
kornia==0.8.0 | ||
mlp-mixer-pytorch==0.2.0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,64 @@ | ||
# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC | ||
# | ||
# SPDX-License-Identifier: Apache-2.0 | ||
import pytest | ||
import torch | ||
from transformers import AutoModelForCausalLM, AutoTokenizer | ||
|
||
import forge | ||
from forge.verify.verify import verify | ||
|
||
from test.models.utils import Framework, Source, Task, build_module_name | ||
from test.utils import download_model | ||
|
||
|
||
# Wrapper to get around past key values | ||
class Wrapper(torch.nn.Module): | ||
def __init__(self, model): | ||
super().__init__() | ||
self.model = model | ||
|
||
def forward(self, input_ids, attention_mask): | ||
output = self.model(input_ids, None, attention_mask) | ||
return output | ||
|
||
|
||
@pytest.mark.nightly | ||
@pytest.mark.parametrize("variant", ["bigscience/bloom-1b1"]) | ||
def test_bloom(record_forge_property, variant): | ||
|
||
# Build Module Name | ||
module_name = build_module_name( | ||
framework=Framework.PYTORCH, | ||
model="bloom", | ||
variant=variant, | ||
source=Source.HUGGINGFACE, | ||
task=Task.CAUSAL_LM, | ||
) | ||
|
||
# Record Forge Property | ||
record_forge_property("model_name", module_name) | ||
|
||
# Load tokenizer and model from HuggingFace | ||
tokenizer = download_model(AutoTokenizer.from_pretrained, variant, padding_side="left") | ||
model = download_model(AutoModelForCausalLM.from_pretrained, variant, use_cache=False, return_dict=False) | ||
model.eval() | ||
framework_model = Wrapper(model) | ||
|
||
# Prepare input | ||
test_input = "This is a sample text from " | ||
input_tokens = tokenizer.encode_plus( | ||
test_input, | ||
return_tensors="pt", | ||
max_length=32, | ||
padding="max_length", | ||
add_special_tokens=True, | ||
truncation=True, | ||
) | ||
inputs = [input_tokens["input_ids"], input_tokens["attention_mask"]] | ||
|
||
# Forge compile framework model | ||
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name) | ||
|
||
# Model Verification | ||
verify(inputs, framework_model, compiled_model) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,64 @@ | ||
# SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC | ||
|
||
# SPDX-License-Identifier: Apache-2.0 | ||
# Reference: https://huggingface.co/state-spaces/mamba-2.8b-hf | ||
|
||
import pytest | ||
import torch | ||
from transformers import AutoTokenizer, MambaForCausalLM | ||
|
||
import forge | ||
from forge.verify.verify import verify | ||
|
||
from test.models.utils import Framework, Source, Task, build_module_name | ||
from test.utils import download_model | ||
|
||
|
||
# Wrapper to return only the output tensor, excluding cache or additional outputs | ||
class Wrapper(torch.nn.Module): | ||
def __init__(self, model): | ||
super().__init__() | ||
self.model = model | ||
|
||
def forward(self, input_ids): | ||
output = self.model(input_ids) | ||
return output[0] | ||
|
||
|
||
variants = [ | ||
"state-spaces/mamba-790m-hf", | ||
"state-spaces/mamba-2.8b-hf", | ||
"state-spaces/mamba-1.4b-hf", | ||
"state-spaces/mamba-370m-hf", | ||
] | ||
|
||
|
||
@pytest.mark.nightly | ||
@pytest.mark.parametrize("variant", variants) | ||
def test_mamba(record_forge_property, variant): | ||
if variant != "state-spaces/mamba-790m-hf": | ||
pytest.skip("Skipping this variant; only testing the base model (mamba-790m-hf) for now.") | ||
|
||
# Build Module Name | ||
module_name = build_module_name( | ||
framework=Framework.PYTORCH, model="mamba", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE | ||
) | ||
|
||
# Record Forge Property | ||
record_forge_property("model_name", module_name) | ||
|
||
# Load tokenizer and model from HuggingFace | ||
tokenizer = download_model(AutoTokenizer.from_pretrained, variant) | ||
model = download_model(MambaForCausalLM.from_pretrained, variant) | ||
model.eval() | ||
framework_model = Wrapper(model) | ||
|
||
# Prepare input | ||
prompt = "Hey how are you doing?" | ||
inputs = [tokenizer(prompt, return_tensors="pt")["input_ids"]] | ||
|
||
# Forge compile framework model | ||
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name) | ||
|
||
# Model Verification | ||
verify(inputs, framework_model, compiled_model) |
Empty file.
Oops, something went wrong.