Skip to content

Commit

Permalink
#13368: Add Llama3.2-11B text model to CI
Browse files Browse the repository at this point in the history
  • Loading branch information
mtairum committed Oct 16, 2024
1 parent b34a138 commit b2b522c
Show file tree
Hide file tree
Showing 8 changed files with 23 additions and 8 deletions.
3 changes: 2 additions & 1 deletion models/demos/llama3/tests/test_llama_ci_dispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,9 @@ def test_llama_ci_dispatch():
dir_1b = "/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-1B-Instruct/"
dir_3b = "/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-3B-Instruct/"
dir_8b = "/mnt/MLPerf/tt_dnn-models/llama/Meta-Llama-3.1-8B-Instruct/"
dir_11b = "/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/"

# for dir_path in [dir_1b, dir_3b, dir_8b]:
# for dir_path in [dir_1b, dir_3b, dir_8b, dir_11b]:
for dir_path in [dir_1b]:
logger.info(f"Running fast dispatch tests for {dir_path}")
os.environ["LLAMA_DIR"] = dir_path
Expand Down
4 changes: 3 additions & 1 deletion tests/scripts/run_performance.sh
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,11 @@ run_perf_models_llm_javelin() {
llama1b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-1B-Instruct/
# Llama3.2-3B
llama3b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-3B-Instruct/
# Llama3.2-11B
llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/

# Run all Llama3 tests for 8B, 1B, and 3B weights
for llama_dir in "$llama8b" "$llama1b" "$llama3b"; do
for llama_dir in "$llama8b" "$llama1b" "$llama3b" "$llama11b"; do
LLAMA_DIR=$llama_dir pytest -n auto models/demos/llama3/tests/test_llama_perf.py -m $test_marker
echo "LOG_METAL: Llama3 tests for $llama_dir completed"
done
Expand Down
4 changes: 3 additions & 1 deletion tests/scripts/run_python_model_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,11 @@ run_python_model_tests_wormhole_b0() {
llama1b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-1B-Instruct/
# Llama3.2-3B
llama3b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-3B-Instruct/
# Llama3.2-11B
llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/

# Run all Llama3 tests for 8B, 1B, and 3B weights - dummy weights with tight PCC check
for llama_dir in "$llama8b" "$llama1b" "$llama3b"; do
for llama_dir in "$llama8b" "$llama1b" "$llama3b" "$llama11b"; do
LLAMA_DIR=$llama_dir WH_ARCH_YAML=wormhole_b0_80_arch_eth_dispatch.yaml pytest -n auto models/demos/llama3/tests/test_llama_model.py -k "quick" ; fail+=$?
echo "LOG_METAL: Llama3 tests for $llama_dir completed"
done
Expand Down
4 changes: 3 additions & 1 deletion tests/scripts/single_card/run_single_card_demo_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,11 @@ run_common_func_tests() {
llama1b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-1B-Instruct/
# Llama3.2-3B
llama3b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-3B-Instruct/
# Llama3.2-11B
llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/

# Run all Llama3 tests for 8B, 1B, and 3B weights
for llama_dir in "$llama8b" "$llama1b" "$llama3b"; do
for llama_dir in "$llama8b" "$llama1b" "$llama3b" "$llama11b"; do
LLAMA_DIR=$llama_dir WH_ARCH_YAML=wormhole_b0_80_arch_eth_dispatch.yaml pytest -n auto models/demos/llama3/demo/demo.py --timeout 600; fail+=$?
echo "LOG_METAL: Llama3 tests for $llama_dir completed"
done
Expand Down
4 changes: 3 additions & 1 deletion tests/scripts/t3000/run_t3000_demo_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,11 @@ run_t3000_llama3_tests() {
llama1b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-1B-Instruct/
# Llama3.2-3B
llama3b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-3B-Instruct/
# Llama3.2-11B
llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/

# Run all Llama3 tests for 8B, 1B, and 3B weights
for llama_dir in "$llama8b" "$llama1b" "$llama3b"; do
for llama_dir in "$llama8b" "$llama1b" "$llama3b" "$llama11b"; do
LLAMA_DIR=$llama_dir WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/demo/demo.py --timeout 600; fail+=$?
echo "LOG_METAL: Llama3 tests for $llama_dir completed"
done
Expand Down
4 changes: 3 additions & 1 deletion tests/scripts/t3000/run_t3000_frequent_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,11 @@ run_t3000_llama3_tests() {
llama1b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-1B-Instruct/
# Llama3.2-3B
llama3b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-3B-Instruct/
# Llama3.2-11B
llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/

# Run all Llama3 tests for 8B, 1B, and 3B weights
for llama_dir in "$llama8b" "$llama1b" "$llama3b"; do
for llama_dir in "$llama8b" "$llama1b" "$llama3b" "$llama11b"; do
LLAMA_DIR=$llama_dir WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/test_llama_model.py -k full ; fail+=$?
LLAMA_DIR=$llama_dir WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/test_llama_model_prefill.py ; fail+=$?
echo "LOG_METAL: Llama3 tests for $llama_dir completed"
Expand Down
4 changes: 3 additions & 1 deletion tests/scripts/t3000/run_t3000_model_perf_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,11 @@ run_t3000_llama3_tests() {
llama1b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-1B-Instruct/
# Llama3.2-3B
llama3b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-3B-Instruct/
# Llama3.2-11B
llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/

# Run all Llama3 tests for 8B, 1B, and 3B weights
for llama_dir in "$llama8b" "$llama1b" "$llama3b"; do
for llama_dir in "$llama8b" "$llama1b" "$llama3b" "$llama11b"; do
LLAMA_DIR=$llama_dir WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/test_llama_perf.py ; fail+=$?
echo "LOG_METAL: Llama3 tests for $llama_dir completed"
done
Expand Down
4 changes: 3 additions & 1 deletion tests/scripts/t3000/run_t3000_unit_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -102,9 +102,11 @@ run_t3000_llama3_tests() {
llama1b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-1B-Instruct/
# Llama3.2-3B
llama3b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-3B-Instruct/
# Llama3.2-11B
llama11b=/mnt/MLPerf/tt_dnn-models/llama/Llama3.2-11B-Vision-Instruct/

# Run all Llama3 tests for 8B, 1B, and 3B weights
for llama_dir in "$llama8b" "$llama1b" "$llama3b"; do
for llama_dir in "$llama8b" "$llama1b" "$llama3b" "$llama11b"; do
LLAMA_DIR=$llama_dir WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/test_llama_attention.py ; fail+=$?
LLAMA_DIR=$llama_dir WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/test_llama_attention_prefill.py ; fail+=$?
LLAMA_DIR=$llama_dir WH_ARCH_YAML=$wh_arch_yaml pytest -n auto models/demos/llama3/tests/test_llama_embedding.py ; fail+=$?
Expand Down

0 comments on commit b2b522c

Please sign in to comment.