Skip to content

Commit

Permalink
add phi3
Browse files Browse the repository at this point in the history
  • Loading branch information
andreea-popescu-reef committed Sep 6, 2024
1 parent f956da5 commit 1b46224
Show file tree
Hide file tree
Showing 8 changed files with 148 additions and 83 deletions.
6 changes: 5 additions & 1 deletion .github/workflows/build_push_image.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
name: "CD: build & push image"

on:
push:
branches: [build-image]
workflow_dispatch:

env:
PYTHON_DEFAULT_VERSION: "3.12"
TAG_VERSION: "v0-latest"
Expand Down Expand Up @@ -28,7 +33,6 @@ jobs:
- name: Docker build and push
run: |
df -h
IMAGE_NAME="${DOCKER_REPO_NAME}:${TAG_VERSION}"
cd src/compute_horde_prompt_gen
Expand Down
5 changes: 4 additions & 1 deletion .github/workflows/smoke_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@ name: Run Smoke Test
on:
push:
branches: [master, main]
pull_request:
branches: [master, main]
workflow_dispatch:

env:
PYTHON_DEFAULT_VERSION: "3.11"
Expand All @@ -28,7 +31,7 @@ jobs:
python3 run.py --model_name mock --number_of_batches 5 --number_of_prompts_per_batch 20 --uuids uuid1,uuid2,uuid3,uuid4,uuid5
echo -e "\ngenerated batches:"
ls ./output/
ls output/
echo -e "\nchecking if prompts are generated fine"
for i in $(seq 1 5); do
Expand Down
14 changes: 9 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,27 +7,31 @@ The prompt that generates prompts is inspired from [Bittensor Subnet 18 (Cortex.

The generated prompts will be saved in `<output_folder_path>/prompts_<uuid>.txt`, each line of the text file containing a prompt.

supports llama3 (`meta-llama/Meta-Llama-3.1-8B-Instruct`) and phi3 (`microsoft/Phi-3.5-mini-instruct`) models

### build image


```bash
# download the model data from huggingface
python3 download_model.py --huggingface_token <API_KEY>

cd src/compute_horde_prompt_gen

# download model data
python3 download_model.py --model_name phi3 --huggingface_token <API_KEY>

# build the image
docker build -t compute-horde-prompt-gen .
```


### run image
```bash
docker run -v ./output/:/app/output/ compute-horde-prompt-gen --number_of_batches 3 --number_of_prompts_per_batch 4 --uuids uuid1,uuid2,uuid3
docker run -v ./output/:/app/output/ compute-horde-prompt-gen --model_name phi3 --number_of_prompts_per_batch 4 --uuids uuid1,uuid2,uuid3
```

### testint
```bash
python3 run.py --mock_model --number_of_batches 3 --number_of_prompts_per_batch 4 --uuids uuid1,uuid2,uuid3
cd src/compute_horde_prompt_gen
python3 run.py --model_name mock --number_of_prompts_per_batch 4 --uuids uuid1,uuid2,uuid3
```

---
Expand Down
30 changes: 22 additions & 8 deletions download_model.py → ...ompute_horde_prompt_gen/download_model.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
import os
import argparse
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
)

MODEL_PATHS = {
"llama3": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"phi3": "microsoft/Phi-3.5-mini-instruct",
}

if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Save huggingface model")
parser.add_argument(
Expand All @@ -15,24 +21,32 @@
parser.add_argument(
"--model_name",
type=str,
default="meta-llama/Meta-Llama-3.1-8B-Instruct",
help="Model name to use",
choices=["llama3", "phi3"],
required=True,
help="Model to use - options are llama3 or phi3",
)
parser.add_argument(
"--model_path",
"--save_path",
type=str,
default="./src/compute_horde_prompt_gen/saved_models/",
default="./saved_models/",
help="Path to save the model and tokenizer to",
)

args = parser.parse_args()
save_path = os.path.join(args.save_path, args.model_name)
model_name = MODEL_PATHS[args.model_name]

print(f"Saving {model_name} model to {save_path}\n")

model = AutoModelForCausalLM.from_pretrained(
args.model_name,
model_name,
# either give token directly or assume logged in with huggingface-cli
token=args.huggingface_token or True,
)
model.save_pretrained(args.model_path)
model.save_pretrained(save_path)

tokenizer = AutoTokenizer.from_pretrained(args.model_name)
tokenizer.save_pretrained(args.model_path)
tokenizer = AutoTokenizer.from_pretrained(
model_name,
token=args.huggingface_token or True,
)
tokenizer.save_pretrained(save_path)
70 changes: 60 additions & 10 deletions src/compute_horde_prompt_gen/model.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,28 @@
import logging

from prompt import PROMPT_ENDING
import io

log = logging.getLogger(__name__)


def strip_input(output: str, ending: str) -> str:
# input prompt is repeated in the output, so we need to remove it
idx = output.find(ending) + len(ending)
return output[idx:].strip()


class MockModel:
def __init__(self):
pass

def generate(self, prompts: list[str], num_return_sequences: int, **_kwargs):
return [1 for _ in range(len(prompts) * num_return_sequences)]

def decode(self, _output):
return f"COPY PASTE INPUT PROMPT {PROMPT_ENDING} Here is the list of prompts:\nHow are you?\nDescribe something\nCount to ten\n"
content = f"Here is the list of prompts:\nHow are you?\nDescribe something\nCount to ten\n"
return [content for _ in range(len(prompts) * num_return_sequences)]


class GenerativeModel:
def __init__(self, model_path: str, quantize: bool = False):
self.input_prompt_ending = None

import torch
from transformers import (
AutoTokenizer,
Expand Down Expand Up @@ -45,26 +50,71 @@ def __init__(self, model_path: str, quantize: bool = False):
model_path,
local_files_only=True,
)

def tokenize(self, prompts: list[str], role: str) -> str:
# set default padding token
self.tokenizer.pad_token = self.tokenizer.eos_token

role_templates = {
"system": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n{{{{ {} }}}}<|eot_id|>",
"user": "<|start_header_id|>user<|end_header_id|>\n{{{{ {} }}}}<|eot_id|>",
"assistant": "<|start_header_id|>assistant<|end_header_id|>\n{{{{ {} }}}}<|eot_id|>",
"end": "<|start_header_id|>assistant<|end_header_id|>",
}

def tokenize(prompt: str) -> str:
msgs = [
{"role": "system", "content": role},
{"role": "user", "content": prompt},
]
full_prompt = io.StringIO()
for msg in msgs:
full_prompt.write(role_templates[msg["role"]].format(msg["content"]))
full_prompt.write(role_templates["end"])
return full_prompt.getvalue()

inputs = [tokenize(prompt) for prompt in prompts]
inputs = self.tokenizer(inputs, return_tensors="pt", padding=True).to("cuda")
return inputs

def decode(self, output) -> list[str]:
return [
strip_input(
self.tokenizer.decode(x, skip_special_tokens=True),
self.input_prompt_ending,
)
for x in output
]

def generate(
self,
prompts: list[str],
role: str,
num_return_sequences: int,
max_new_tokens: int,
temperature: float,
):
# encode the prompts
inputs = self.tokenizer(prompts, return_tensors="pt", padding=True).to("cuda")
inputs = self.tokenize(prompts, role)

return self.model.generate(
output = self.model.generate(
**inputs,
max_new_tokens=max_new_tokens,
temperature=temperature,
num_return_sequences=num_return_sequences,
do_sample=True, # use sampling-based decoding
)

def decode(self, output):
return self.tokenizer.decode(output, skip_special_tokens=True)
return self.decode(output)


class Phi3(GenerativeModel):
def __init__(self, model_path: str, quantize: bool = False):
super().__init__(model_path, quantize)
self.input_prompt_ending = "assistant<|end_header_id|>"


class Llama3(GenerativeModel):
def __init__(self, model_path: str, quantize: bool = False):
super().__init__(model_path, quantize)
self.input_prompt_ending = " }}assistant"
50 changes: 16 additions & 34 deletions src/compute_horde_prompt_gen/prompt.py
Original file line number Diff line number Diff line change
@@ -1,56 +1,38 @@
import io
import random
from seeds import THEMES, ABILITIES, FORMATS

PROMPT_ENDING = " }}assistant"


class PromptGeneratingPrompt:
def random_select(self, arr: list[str], num: int = 5) -> str:
random.shuffle(arr)
return ", ".join(arr[:num]) + ", etc"

def generate_prompt(self) -> str:
def generate_prompt(self, short=True) -> str:
themes = self.random_select(THEMES, num=3)

if short:
return (
f"Generate a list of 10 questions or instruct tasks related to the themes of {themes}. "
f"Output each prompt on a new line without any extra commentary or special characters."
)

relevance_level = random.randint(5, 20)
complexity_level = random.randint(5, 20)

themes = self.random_select(THEMES, num=3)
abilities = self.random_select(ABILITIES, num=4)
formats = self.random_select(FORMATS, num=5)

prompt = (
f"Generate a list of 5 complex prompts (questions or instruct tasks) that cover a wide range of skills and knowledge areas related to the themes of {themes}. "
return (
f"Generate a list of 10 complex prompts (questions or instruct tasks) that cover a wide range of skills and knowledge areas related to the themes of {themes}. "
f"Each of these prompts should: "
f"\n- have a complexity level of {complexity_level} out of 20 and a relevance level to the theme of {relevance_level} out of 20"
f"\n- test various cognitive abilities ({abilities}) and require different types of writting formats ({formats})"
f"\n- challenge the model's ability to understand and respond appropriately"
f"\n- varyingly explore the {themes} in a manner that is consistent with their assigned complexity and relevance levels to the theme"
f"\nOutput each prompt on a new line without any extra commentary or special characters."
)
return prompt

def generate_role(self) -> str:
role = "You are a prompt engineer tasked with prompts of varying complexity to test the capabilities of a new language model. For each prompt, consider what aspect of the language model's capabilities it is designed to test and ensure that the set of prompts covers a broad spectrum of potential use cases for the language model. Only output the prompts, one per line without any extra commentary. Do not use any special characters or formatting, numbering or styling in the output."
return role

def tokenize(self, prompt: str, role: str) -> str:
role_templates = {
"system": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n{{{{ {} }}}}<|eot_id|>",
"user": "<|start_header_id|>user<|end_header_id|>\n{{{{ {} }}}}<|eot_id|>",
"assistant": "<|start_header_id|>assistant<|end_header_id|>\n{{{{ {} }}}}<|eot_id|>",
"end": "<|start_header_id|>assistant<|end_header_id|>",
}
msgs = [
{"role": "system", "content": role},
{"role": "user", "content": prompt},
]
full_prompt = io.StringIO()
for msg in msgs:
full_prompt.write(role_templates[msg["role"]].format(msg["content"]))
full_prompt.write(role_templates["end"])
return full_prompt.getvalue()

def generate(self):
prompt = self.generate_prompt()
role = self.generate_role()
return self.tokenize(prompt, role)

def generate_role(self, short=True) -> str:
if short:
return "You are a prompt engineer tasked with prompts of varying complexity to test the capabilities of a new language model."
return "You are a prompt engineer tasked with prompts of varying complexity to test the capabilities of a new language model. For each prompt, consider what aspect of the language model's capabilities it is designed to test and ensure that the set of prompts covers a broad spectrum of potential use cases for the language model. Only output the prompts, one per line without any extra commentary. Do not use any special characters or formatting, numbering or styling in the output."
Loading

0 comments on commit 1b46224

Please sign in to comment.