generated from interTwin-eu/repository-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
dags_airflow_ale_kopfocal.py
73 lines (47 loc) · 1.7 KB
/
dags_airflow_ale_kopfocal.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import airflow.utils.dates
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow import DAG
from airflow.operators.dummy import DummyOperator
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
from kubernetes.client import models as k8s
import json
from datetime import timedelta
dag = DAG(
dag_id="kubeoptest_focal",
start_date=airflow.utils.dates.days_ago(3),
schedule_interval=None,
description="Test for kubernetes operator",
)
##DAG CALLABLES
### DAG DEFS
Ini = DummyOperator(task_id="start_training", dag=dag)
Op =KubernetesPodOperator(
namespace="glitchflow",
# the Docker image to launch
image="debian",
image_pull_policy="Always",
cmds=["bash","-cx"],
arguments=["echo","Hello World"],
# Pod configuration
# name the Pod
name="airflow_op",
# launch the Pod on the same cluster as Airflow is running on
in_cluster=True,
# attach labels to the Pod, can be used for grouping
labels={"app": "preq", "backend": "airflow"},
# reattach to worker instead of creating a new Pod on worker failure
reattach_on_restart=True,
# delete Pod after the task is finished
is_delete_operator_pod=True,
# get log stdout of the container as task logs
get_logs=True,
# log events in case of Pod failure
log_events_on_failure=True,
# enable xcom
do_xcom_push=True,
# unique id of the task within the DAG
task_id="kubeop",
dag=dag,
#env_vars={"NAME_TO_GREET": f"{name}"},
)
Ini>>Op