Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deploy divisions and logics results for single query #104

Merged
merged 9 commits into from
Jul 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 11 additions & 2 deletions .github/workflows/deploy-website.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,21 @@ jobs:
key: pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
- name: Set up the environment
uses: ./.github/actions/setup-poetry-env
#For smtcomp data cache
- uses: actions/cache@v3
id: data-cache
with:
path: data/*.feather
key: data-cache-${{ hashFiles('data/*.json.gz') }}
- name: Cache generation if needed
if: steps.data-cache.outputs.cache-hit != 'true'
run: |
poetry run make cache
# Generate files using smtcomp tool
- name: Files generation
run: |
pip install json-schema-for-humans
poetry run make submission-doc
poetry run make participant-data
poetry run make submission-doc generation
###
- name: Setup Pages
id: pages
Expand Down
15 changes: 13 additions & 2 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,19 @@ jobs:
- name: Run checks
run: make check

- name: Run tests
run: make tests
#For smtcomp data cache (TODO unduplicate from deploy-website)
- uses: actions/cache@v3
id: data-cache
with:
path: data/*.feather
key: data-cache-${{ hashFiles('data/*.json.gz') }}
- name: Cache generation if needed
if: steps.data-cache.outputs.cache-hit != 'true'
run: |
poetry run make cache

- name: Run test
run: make test

tox:
runs-on: ubuntu-latest
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -172,3 +172,4 @@ schema_doc.min.js
*.feather
tmp/
/web/public/
/web/content/results/*.md
12 changes: 10 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ test: generation ## Test the code with pytest
@echo "🚀 Testing code: Running pytest"
@poetry run pytest

generation: submission-generation participant-data ## Files generation for the website
generation: submission-generation participant-data results-generation ## Files generation for the website

.PHONY: build
build: clean-build ## Build wheel file using poetry
Expand All @@ -45,7 +45,7 @@ GENERATED_SCHEMA_FILE=web/content/solver_submission/schema.json
GENERATED_SCHEMA_HTML=web/content/solver_submission/schema.html
PARTICIPANT_DATA_FILE=web/data/participants.json

.PHONY: submission-doc submission-generation participant-data
.PHONY: submission-doc submission-generation participant-data results-generation cache
submission-generation:
@echo "🚀 Generating schema to $(GENERATED_SCHEMA_FILE)"
@poetry run smtcomp dump-json-schema $(GENERATED_SCHEMA_FILE)
Expand All @@ -59,5 +59,13 @@ participant-data:
@echo "🚀 Generating participant data to $(PARTICIPANT_DATA_FILE)"
@poetry run smtcomp show-json submissions/*.json $(PARTICIPANT_DATA_FILE)

results-generation:
@echo "🚀 Generating results to web/content/results"
@poetry run smtcomp export-results-pages data

cache:
@echo "🚀 Generating cache"
@poetry run smtcomp create-cache data

hugo-server:
(cd web; hugo server)
15 changes: 15 additions & 0 deletions smtcomp/defs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1384,6 +1384,7 @@ class Results(BaseModel):

## Parameters that can change each year
class Config:
__next_id__: ClassVar[int] = 0
current_year = 2024
oldest_previous_results = 2018
timelimit_s = 60 * 20
Expand Down Expand Up @@ -1427,10 +1428,20 @@ class Config:
]

def __init__(self, data: Path | None) -> None:
self.id = self.__class__.__next_id__
self.__class__.__next_id__ += 1
if data is not None and data.name != "data":
raise ValueError("Consistency check, data directory must be named 'data'")
self._data = data

def __eq__(self, other: Any) -> bool:
if isinstance(other, Config):
return self.id == other.id
return False

def __hash__(self: Config) -> int:
return self.id

@functools.cached_property
def data(self) -> Path:
if self._data is None:
Expand Down Expand Up @@ -1478,6 +1489,10 @@ def submissions(self) -> list[Submission]:
Submission.model_validate_json(Path(file).read_text()) for file in self.data.glob("../submissions/*.json")
]

@functools.cached_property
def web_results(self) -> Path:
return self.data / ".." / "web" / "content" / "results"

@functools.cached_property
def seed(self) -> int:
unknown_seed = 0
Expand Down
219 changes: 219 additions & 0 deletions smtcomp/generate_website_page.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,219 @@
import functools, itertools
from typing import Set, Dict, Optional, cast, List, DefaultDict
from pathlib import Path, PurePath
from smtcomp import defs
from rich import progress
from rich import print
from pydantic import BaseModel
import polars as pl
import smtcomp.scoring
from smtcomp.utils import *
import smtcomp.results

# Warning: Hugo lowercase all dict keys


class PodiumStep(BaseModel):
name: str
competing: str # yes or no
errorScore: int
correctScore: int
CPUScore: float
WallScore: float
solved: int
solved_sat: int
solved_unsat: int
unsolved: int
abstained: int
timeout: int
memout: int


class PodiumDivision(BaseModel):
resultdate: str
year: int
divisions: str # divisions_2023
participants: str # participants_2023
disagreements: str # disagreements_2023
division: str # Arith
track: str # track_single_query
n_benchmarks: int
time_limit: int
mem_limit: int
logics: dict[str, int]
winner_seq: str
winner_par: str
winner_sat: str
winner_unsat: str
winner_24s: str

sequential: list[PodiumStep]
parallel: list[PodiumStep]
sat: list[PodiumStep]
unsat: list[PodiumStep]
twentyfour: list[PodiumStep]

layout: str = "result"


def podium_steps(podium: List[dict[str, Any]] | None) -> List[PodiumStep]:
if podium is None:
return []
else:
return [
PodiumStep(
name=s["solver"],
competing="yes", # TODO
errorScore=-s["error_score"],
correctScore=s["correctly_solved_score"],
CPUScore=s["cpu_time_score"],
WallScore=s["wallclock_time_score"],
solved=s["solved"],
solved_sat=s["solved_sat"],
solved_unsat=s["solved_unsat"],
unsolved=s["unsolved"],
abstained=s["abstained"],
timeout=s["timeout"],
memout=s["memout"],
)
for s in podium
]


def make_podium(config: defs.Config, d: dict[str, Any], for_division: bool) -> PodiumDivision:
def get_winner(l: List[dict[str, str]] | None) -> str:
# TODO select only participating
if l is None:
return "-"
else:
return l[0]["solver"]

if for_division:
division = defs.Division.name_of_int(d["division"])
logics = dict((defs.Logic.name_of_int(d2["logic"]), d2["n"]) for d2 in d["logics"])
else:
division = defs.Logic.name_of_int(d["logic"])
logics = dict()

return PodiumDivision(
resultdate="2024-07-08",
year=config.current_year,
divisions=f"divisions_{config.current_year}",
participants=f"participants_{config.current_year}",
disagreements=f"disagreements_{config.current_year}",
division=division,
track="track_single_query",
n_benchmarks=d["total"],
time_limit=config.timelimit_s,
mem_limit=config.memlimit_M,
logics=logics,
winner_seq=get_winner(d[smtcomp.scoring.Kind.seq.name]),
winner_par=get_winner(d[smtcomp.scoring.Kind.par.name]),
winner_sat=get_winner(d[smtcomp.scoring.Kind.sat.name]),
winner_unsat=get_winner(d[smtcomp.scoring.Kind.unsat.name]),
winner_24s=get_winner(d[smtcomp.scoring.Kind.twentyfour.name]),
sequential=podium_steps(d[smtcomp.scoring.Kind.seq.name]),
parallel=podium_steps(d[smtcomp.scoring.Kind.par.name]),
sat=podium_steps(d[smtcomp.scoring.Kind.sat.name]),
unsat=podium_steps(d[smtcomp.scoring.Kind.unsat.name]),
twentyfour=podium_steps(d[smtcomp.scoring.Kind.twentyfour.name]),
)


def sq_generate_datas(
config: defs.Config, selection: pl.LazyFrame, results: pl.LazyFrame, for_division: bool
) -> dict[str, PodiumDivision]:
"""
Input with disagreements
Generate datas for divisions or for logics
"""
assert "disagreements" in results.columns

if for_division:
group_by = "division"
name_of_int = defs.Division.name_of_int
else:
group_by = "logic"
name_of_int = defs.Logic.name_of_int

results = results.filter(track=int(defs.Track.SingleQuery)).drop("track")

selection = selection.filter(selected=True)

len_by_division = selection.group_by(group_by).agg(total=pl.len())

def info_for_podium_step(kind: smtcomp.scoring.Kind, config: defs.Config, results: pl.LazyFrame) -> pl.LazyFrame:
results = smtcomp.scoring.filter_for(kind, config, results)
return (
intersect(results, len_by_division, on=[group_by])
.group_by(group_by, "solver")
.agg(
pl.sum("error_score"),
pl.sum("correctly_solved_score"),
pl.sum("cpu_time_score"),
pl.sum("wallclock_time_score"),
solved=(smtcomp.scoring.known_answer).sum(),
solved_sat=(smtcomp.scoring.sat_answer).sum(),
solved_unsat=(smtcomp.scoring.unsat_answer).sum(),
unsolved=(smtcomp.scoring.unknown_answer).sum(),
timeout=(smtcomp.scoring.timeout_answer).sum(),
memout=(smtcomp.scoring.memout_answer).sum(),
abstained=pl.col("total").first() - pl.len(),
)
.sort([group_by] + smtcomp.scoring.scores, descending=True)
.group_by(group_by, maintain_order=True)
.agg(
pl.struct(
"solver",
"error_score",
"correctly_solved_score",
"cpu_time_score",
"wallclock_time_score",
"solved",
"solved_sat",
"solved_unsat",
"unsolved",
"timeout",
"memout",
"abstained",
).alias(kind.name)
)
)

if for_division:
lf_logics = [
selection.group_by("division", "logic")
.agg(n=pl.len())
.group_by("division")
.agg(logics=pl.struct("logic", "n"))
]
else:
lf_logics = []

lf_info2 = results.group_by(group_by).agg(disagreements=(pl.col("disagreements") == True).sum())

results = results.filter(disagreements=False).drop("disagreements")

l = (
[len_by_division, lf_info2]
+ lf_logics
+ [info_for_podium_step(kind, config, results) for kind in smtcomp.scoring.Kind]
)

r = functools.reduce(lambda x, y: x.join(y, validate="1:1", on=[group_by], how="left"), l)

df = r.collect()

return dict((name_of_int(d[group_by]), make_podium(config, d, for_division)) for d in df.to_dicts())


def export_results(config: defs.Config, selection: pl.LazyFrame, results: pl.LazyFrame) -> None:

dst = config.web_results
dst.mkdir(parents=True, exist_ok=True)

results = results.collect().lazy()

for for_division in [True, False]:
for name, data in sq_generate_datas(config, selection, results, for_division).items():
(dst / f"{name.lower()}-single-query.md").write_text(data.model_dump_json(indent=1))
Loading
Loading