Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automatically calculate perplexity metrics for model supportability #1008

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions tools/python/model_validation/perplexity_metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from datasets import load_dataset
import numpy as np
import onnxruntime_genai as og
import torch

def get_wikitext2():
testdata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='test')
return testdata

def perplexity_eval(model_dir):
model = og.Model(f'{model_dir}')
tokenizer = og.Tokenizer(model)

dataset = get_wikitext2()

total_log_probs = 0
total_token_count = 0

for batch in dataset:

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

batch in dataset

concatenate all batches

text = batch["text"]

input_ids = tokenizer.encode_batch([text])

params = og.GeneratorParams(model)
generator = og.Generator(model, params)

logits = generator.compute_logits("logits")

targets = np.roll(input_ids, -1, axis=1)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

np.roll(input_ids, -1, axis=1)

what does it do?


# Use LogSoftMax here
log_probs = torch.nn.functional.log_softmax(torch.tensor(logits), dim=1).numpy()

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

1

probably -1


batch_size, seq_length = targets.shape
target_log_probs = log_probs[np.arange(batch_size)[:, None], np.arange(seq_length), targets]

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

target_log_probs = log_probs[np.arange(batch_size)[:, None], np.arange(seq_length), targets]

this is the critical part


total_log_probs += np.sum(target_log_probs)
total_token_count += targets.size

avg_log_prob = total_log_probs / total_token_count
perplexity = np.exp(-avg_log_prob)

print(f"The perplexity of {model_dir} is {perplexity}")
return perplexity
16 changes: 11 additions & 5 deletions tools/python/model_validation/validation_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,29 @@
"models": [
{
"name": "Qwen/Qwen2.5-7B-Instruct",
"chat_template": "<|im_start|>\n <|user|> \n {input} <|im_end>\n'<|im_start|>assistant\n"
"chat_template": "<|im_start|>\n <|user|> \n {input} <|im_end>\n'<|im_start|>assistant\n",
"metrics": []
},
{
"name": "meta-llama/Llama-2-7b-chat-hf",
"chat_template": "<s>[INST]<<SYS>>\n{input}<</SYS>>[INST]"
"chat_template": "<s>[INST]<<SYS>>\n{input}<</SYS>>[INST]",
"metrics": []

},
{
"name": "mistralai/Mistral-7B-Instruct-v0.3",
"chat_template": " \"[INST] \" + {input} + \"[/INST]\""
"chat_template": " \"[INST] \" + {input} + \"[/INST]\"",
"metrics": []
},
{
"name": "microsoft/Phi-3.5-mini-instruct",
"chat_template": "<|user|>\n{input} <|end|>\n<|assistant|>"
"chat_template": "<|user|>\n{input} <|end|>\n<|assistant|>",
"metrics": []
},
{
"name": "google/gemma-2-2b-it",
"chat_template": "'<start_of_turn>' + <|user|> '\n' + {input} + '<end_of_turn>\n"
"chat_template": "'<start_of_turn>' + <|user|> '\n' + {input} + '<end_of_turn>\n",
"metrics": []
}
],
"inputs": [
Expand Down
4 changes: 4 additions & 0 deletions tools/python/model_validation/validation_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import json
import os
import pandas as pd
from perplexity_metrics import perplexity_eval

def create_table(output):
df = pd.DataFrame(output, columns=['Model Name', 'Validation Completed', 'Exceptions / Failures'])
Expand Down Expand Up @@ -113,6 +114,9 @@ def validate_model(args, model_dict, model_dir):
print(f'Failure after validation model {e}')
exception = True
output.append([model_dict["name"], validation_complete, e])

# function call out here?
perplexity_eval(output_path)

if not exception:
output.append([model_dict["name"], validation_complete, e])
Expand Down
Loading