forked from starwit/starwit-awareness-engine
-
Notifications
You must be signed in to change notification settings - Fork 0
/
customvalues.template.yaml
71 lines (67 loc) · 3.83 KB
/
customvalues.template.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
videoSource:
logLevel: DEBUG # Options: ERROR, WARNING, INFO, DEBUG
configs:
- id: video1 # The name of the video stream. Will be used as Redis stream key and as camera_id in database output.
uri: 'uri' # Where to retrieve the video from. Must be in a format that OpenCV VideoCapture understands. RTSP stream makes the most sense.
max_fps: 5 # Effectively sets a lower bound on the time between to frames. 0 means off. Integer fractions of original frame rate make the most sense (i.e. 15, 10, 5 for a 30fps source).
scale_width: 0 # If > 0, scale the image to the given width (preserving aspect ratio)
jpeg_quality: 80 # JPEG quality 0 - 100 (100 is lossless and big); Reasonable starting points (for image height): 2160 = 80, 1080 = 90, <720 = 95
objectDetector:
config:
model:
size: "n" # Which Ultralytics model to use. Options: n (nano), s (small), m (medium), l (large), x (xlarge)
device: cpu # Options: cpu, cuda (uses gpu; needs proper setup; see README)
inferenceSize: [ 640, 640 ] # What resolution the image will be downscaled to before object-detection inference (should be square and a multiple of 32)
classes: [ 2 ] # Which object classes to detect (refer to coco object classes)
logLevel: DEBUG # Options: ERROR, WARNING, INFO, DEBUG
redis:
streamIds:
- video1 # On which video streams to detect objects on. Mostly all video source stream ids.
objectTracker:
logLevel: DEBUG # Options: ERROR, WARNING, INFO, DEBUG
needsGpu: False # Whether the algorithm needs gpu support (needs proper setup; see README)
streamIds: # On which video streams to track objects (the same as object-detector stream ids above)
- video1
config: # Tracker specific config (see object-tracker repo / Boxmot for details)
tracker_algorithm: OCSORT
tracker_config:
det_thresh: 0.2
max_age: 30
min_hits: 3
asso_threshold: 0.3
delta_t: 3
asso_func: 'iou'
inertia: 0.2
use_byte: False
geoMapper:
enabled: false # If the geomapper should be deployed (by default it reads from "objecttracker:*"" and outputs into "geomapper:*")
config:
cameras: # Parameters have to be specified for each camera (it makes no sense to run this without correct-ish parameters)
- stream_id: video1 # This must match one of the existing camera streams
passthrough: false # If the stream should be passed through without geo mapping (all other parameters are ignored if true)
focallength_mm: 20 # view_*_deg and image dimensions are equivalent. Sensor size is harder to obtain and therefore only second choice.
sensor_height_mm: 16 # See above
sensor_width_mm: 9 # See above
view_x_deg: 90
view_y_deg: 55
image_width_px: 1920
image_height_px: 1080
elevation_m: 10
tilt_deg: 45
pos_lat: 52
pos_lon: 10
heading_deg: 90
abc_distortion_a: 0
abc_distortion_b: 0
abc_distortion_c: 0
databaseWriter:
enabled: true # If the database-writer should be deployed
config:
redisStreamIds:
- video1 # Which video streams to read (and extract detection and tracking data from)
db:
jdbcUrl: jdbc:postgresql://host:port/ # JDBC URL. Must be a Postgres database. Does not have to be a Timescale (although the latter makes most sense)
schema: schema
username: username
password: password
hypertable: tablename # Table name. Does not have to be a Hypertable.