From f17db335769796334e6430597c0906e9adb4cdc8 Mon Sep 17 00:00:00 2001 From: Dazz993 Date: Sun, 8 Mar 2020 11:08:46 +0800 Subject: [PATCH 1/3] Update __init__.py & Freihand.py & Freihand_det.py for Freihand dataset --- alphapose/datasets/Freihand.py | 283 +++++++++++++++++++++++++++++ alphapose/datasets/Freihand_det.py | 107 +++++++++++ alphapose/datasets/__init__.py | 2 +- 3 files changed, 391 insertions(+), 1 deletion(-) create mode 100644 alphapose/datasets/Freihand.py create mode 100644 alphapose/datasets/Freihand_det.py diff --git a/alphapose/datasets/Freihand.py b/alphapose/datasets/Freihand.py new file mode 100644 index 00000000..2c28d110 --- /dev/null +++ b/alphapose/datasets/Freihand.py @@ -0,0 +1,283 @@ +# ----------------------------------------------------- +# Copyright (c) Shanghai Jiao Tong University. All rights reserved. +# ----------------------------------------------------- + +import copy +import os +import pickle as pk + +import numpy as np + +from abc import abstractmethod, abstractproperty + +import scipy.misc +import torch.utils.data as data +from pycocotools.coco import COCO + +from alphapose.models.builder import DATASET +from alphapose.utils.bbox import bbox_clip_xyxy, bbox_xywh_to_xyxy + +from alphapose.utils.presets import SimpleTransform + + +class CustomDataset(data.Dataset): + """Custom dataset. + Annotation file must be in `coco` format. + + Parameters + ---------- + train: bool, default is True + If true, will set as training mode. + dpg: bool, default is False + If true, will activate `dpg` for data augmentation. + skip_empty: bool, default is False + Whether skip entire image if no valid label is found. + cfg: dict, dataset configuration. + """ + + CLASSES = ['hand'] + EVAL_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + num_joints = 21 + + def __init__(self, + train=True, + dpg=False, + skip_empty=True, + lazy_import=False, + **cfg): + + self._cfg = cfg + self._preset_cfg = cfg['PRESET'] + self._root = cfg['ROOT'] + self._img_prefix = cfg['IMG_PREFIX'] + self._ann_file = os.path.join(self._root, cfg['ANN']) + + self._lazy_import = lazy_import + self._skip_empty = skip_empty + self._train = train + self._dpg = dpg + + if 'AUG' in cfg.keys(): + self._scale_factor = cfg['AUG']['SCALE_FACTOR'] + self._rot = cfg['AUG']['ROT_FACTOR'] + self.num_joints_half_body = cfg['AUG']['NUM_JOINTS_HALF_BODY'] + self.prob_half_body = cfg['AUG']['PROB_HALF_BODY'] + else: + self._scale_factor = 0 + self._rot = 0 + self.num_joints_half_body = -1 + self.prob_half_body = -1 + + self._input_size = self._preset_cfg['IMAGE_SIZE'] + self._output_size = self._preset_cfg['HEATMAP_SIZE'] + + self._sigma = self._preset_cfg['SIGMA'] + + self._check_centers = False + + self.num_class = len(self.CLASSES) + + self.upper_body_ids = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) + self.lower_body_ids = (11, 12, 13, 14, 15, 16) + + if self._preset_cfg['TYPE'] == 'simple': + self.transformation = SimpleTransform( + self, scale_factor=self._scale_factor, + input_size=self._input_size, + output_size=self._output_size, + rot=self._rot, sigma=self._sigma, + train=self._train, add_dpg=self._dpg) + else: + raise NotImplementedError + + self._items, self._labels = self._lazy_load_json() + + def __getitem__(self, idx): + # get image id + img_path = self._items[idx] + img_id = int(os.path.splitext(os.path.basename(img_path))[0]) + + # load ground truth, including bbox, keypoints, image size + label = copy.deepcopy(self._labels[idx]) + img = scipy.misc.imread(img_path, mode='RGB') + + # transform ground truth into training label and apply data augmentation + img, label, label_mask, bbox = self.transformation(img, label) + return img, label, label_mask, img_id, bbox + + def __len__(self): + return len(self._items) + + def _lazy_load_ann_file(self): + if os.path.exists(self._ann_file + '.pkl') and self._lazy_import: + print('Lazy load json...') + with open(self._ann_file + '.pkl', 'rb') as fid: + return pk.load(fid) + else: + _database = COCO(self._ann_file) + if os.access(self._ann_file + '.pkl', os.W_OK): + with open(self._ann_file + '.pkl', 'wb') as fid: + pk.dump(_database, fid, pk.HIGHEST_PROTOCOL) + return _database + + def _lazy_load_json(self): + if os.path.exists(self._ann_file + '_annot_keypoint.pkl') and self._lazy_import: + print('Lazy load annot...') + with open(self._ann_file + '_annot_keypoint.pkl', 'rb') as fid: + items, labels = pk.load(fid) + else: + items, labels = self._load_jsons() + if os.access(self._ann_file + '_annot_keypoint.pkl', os.W_OK): + with open(self._ann_file + '_annot_keypoint.pkl', 'wb') as fid: + pk.dump((items, labels), fid, pk.HIGHEST_PROTOCOL) + + return items, labels + + @abstractmethod + def _load_jsons(self): + pass + + @abstractproperty + def CLASSES(self): + return None + + @abstractproperty + def num_joints(self): + return None + + @abstractproperty + def joint_pairs(self): + """Joint pairs which defines the pairs of joint to be swapped + when the image is flipped horizontally.""" + return None + + +@DATASET.register_module +class Freihand(CustomDataset): + """ Freihand dataset. + + Parameters + ---------- + train: bool, default is True + If true, will set as training mode. + skip_empty: bool, default is False + Whether skip entire image if no valid label is found. Use `False` if this dataset is + for validation to avoid COCO metric error. + dpg: bool, default is False + If true, will activate `dpg` for data augmentation. + """ + CLASSES = ['hand'] + EVAL_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + num_joints = 21 + + @property + def joint_pairs(self): + """Joint pairs which defines the pairs of joint to be swapped + when the image is flipped horizontally.""" + return [[1, 2], [3, 4], [5, 6], [7, 8], + [9, 10], [11, 12], [13, 14], [15, 16],[17, 18],[19, 20]] + + def _load_jsons(self): + """Load all image paths and labels from JSON annotation files into buffer.""" + items = [] + labels = [] + + _freihand = self._lazy_load_ann_file() + classes = [c['name'] for c in _freihand.loadCats(_freihand.getCatIds())] + assert classes == self.CLASSES, "Incompatible category names with Freihand. " + + self.json_id_to_contiguous = { + v: k for k, v in enumerate(_freihand.getCatIds())} + + # iterate through the annotations + image_ids = sorted(_freihand.getImgIds()) + for entry in _freihand.loadImgs(image_ids): + dirname, filename = entry['Freihand_url'].split('/')[-2:] + abs_path = os.path.join(self._root, dirname, filename) + if not os.path.exists(abs_path): + raise IOError('Image: {} not exists.'.format(abs_path)) + label = self._check_load_keypoints(_freihand, entry) + if not label: + continue + + # num of items are relative to person, not image + for obj in label: + items.append(abs_path) + labels.append(obj) + + return items, labels + + def _check_load_keypoints(self, coco, entry): + """Check and load ground-truth keypoints""" + ann_ids = coco.getAnnIds(imgIds=entry['id'], iscrowd=False) + objs = coco.loadAnns(ann_ids) + # check valid bboxes + valid_objs = [] + width = entry['width'] + height = entry['height'] + + for obj in objs: + contiguous_cid = self.json_id_to_contiguous[obj['category_id']] + if contiguous_cid >= self.num_class: + # not class of interest + continue + if max(obj['keypoints']) == 0: + continue + # convert from (x, y, w, h) to (xmin, ymin, xmax, ymax) and clip bound + xmin, ymin, xmax, ymax = bbox_clip_xyxy(bbox_xywh_to_xyxy(obj['bbox']), width, height) + # require non-zero box area + if obj['area'] <= 0 or xmax <= xmin or ymax <= ymin: + continue + if obj['num_keypoints'] == 0: + continue + # joints 3d: (num_joints, 3, 2); 3 is for x, y, z; 2 is for position, visibility + joints_3d = np.zeros((self.num_joints, 3, 2), dtype=np.float32) + for i in range(self.num_joints): + joints_3d[i, 0, 0] = obj['keypoints'][i * 3 + 0] + joints_3d[i, 1, 0] = obj['keypoints'][i * 3 + 1] + # joints_3d[i, 2, 0] = 0 + visible = min(1, obj['keypoints'][i * 3 + 2]) + joints_3d[i, :2, 1] = visible + # joints_3d[i, 2, 1] = 0 + + if np.sum(joints_3d[:, 0, 1]) < 1: + # no visible keypoint + continue + + if self._check_centers and self._train: + bbox_center, bbox_area = self._get_box_center_area((xmin, ymin, xmax, ymax)) + kp_center, num_vis = self._get_keypoints_center_count(joints_3d) + ks = np.exp(-2 * np.sum(np.square(bbox_center - kp_center)) / bbox_area) + if (num_vis / 80.0 + 47 / 80.0) > ks: + continue + + valid_objs.append({ + 'bbox': (xmin, ymin, xmax, ymax), + 'width': width, + 'height': height, + 'joints_3d': joints_3d + }) + + if not valid_objs: + if not self._skip_empty: + # dummy invalid labels if no valid objects are found + valid_objs.append({ + 'bbox': np.array([-1, -1, 0, 0]), + 'width': width, + 'height': height, + 'joints_3d': np.zeros((self.num_joints, 2, 2), dtype=np.float32) + }) + return valid_objs + + def _get_box_center_area(self, bbox): + """Get bbox center""" + c = np.array([(bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0]) + area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0]) + return c, area + + def _get_keypoints_center_count(self, keypoints): + """Get geometric center of all keypoints""" + keypoint_x = np.sum(keypoints[:, 0, 0] * (keypoints[:, 0, 1] > 0)) + keypoint_y = np.sum(keypoints[:, 1, 0] * (keypoints[:, 1, 1] > 0)) + num = float(np.sum(keypoints[:, 0, 1])) + return np.array([keypoint_x / num, keypoint_y / num]), num diff --git a/alphapose/datasets/Freihand_det.py b/alphapose/datasets/Freihand_det.py new file mode 100644 index 00000000..21550cb8 --- /dev/null +++ b/alphapose/datasets/Freihand_det.py @@ -0,0 +1,107 @@ +# ----------------------------------------------------- +# Copyright (c) Shanghai Jiao Tong University. All rights reserved. +# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com) +# ----------------------------------------------------- + +"""Freihand Hand Detection Box dataset.""" +import json +import os + +import scipy.misc +import torch +import torch.utils.data as data +from tqdm import tqdm + +from alphapose.utils.presets import SimpleTransform +from detector.apis import get_detector +from alphapose.models.builder import DATASET + + +@DATASET.register_module +class Freihand_det(data.Dataset): + """ COCO human detection box dataset. + + """ + EVAL_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + + def __init__(self, + det_file=None, + opt=None, + **cfg): + + self._cfg = cfg + self._opt = opt + self._preset_cfg = cfg['PRESET'] + self._root = cfg['ROOT'] + self._img_prefix = cfg['IMG_PREFIX'] + if not det_file: + det_file = cfg['DET_FILE'] + self._ann_file = os.path.join(self._root, cfg['ANN']) + + if os.path.exists(det_file): + print("Detection results exist, will use it") + else: + print("Will create detection results to {}".format(det_file)) + self.write_coco_json(det_file) + + assert os.path.exists(det_file), "Error: no detection results found" + with open(det_file, 'r') as fid: + self._det_json = json.load(fid) + + self._input_size = self._preset_cfg['IMAGE_SIZE'] + self._output_size = self._preset_cfg['HEATMAP_SIZE'] + + self._sigma = self._preset_cfg['SIGMA'] + + if self._preset_cfg['TYPE'] == 'simple': + self.transformation = SimpleTransform( + self, scale_factor=0, + input_size=self._input_size, + output_size=self._output_size, + rot=0, sigma=self._sigma, + train=False, add_dpg=False) + + def __getitem__(self, index): + det_res = self._det_json[index] + if not isinstance(det_res['image_id'], int): + img_id, _ = os.path.splitext(os.path.basename(det_res['image_id'])) + img_id = int(img_id) + else: + img_id = det_res['image_id'] + img_path = './data/Freihand/val/%08d.jpg' % img_id + + # Load image + image = scipy.misc.imread(img_path, mode='RGB') + + imght, imgwidth = image.shape[1], image.shape[2] + x1, y1, w, h = det_res['bbox'] + bbox = [x1, y1, x1 + w, y1 + h] + inp, bbox = self.transformation.test_transform(image, bbox) + return inp, torch.Tensor(bbox), torch.Tensor([det_res['bbox']]), torch.Tensor([det_res['image_id']]), torch.Tensor([det_res['score']]), torch.Tensor([imght]), torch.Tensor([imgwidth]) + + def __len__(self): + return len(self._det_json) + + def write_coco_json(self, det_file): + from pycocotools.coco import COCO + import pathlib + + _coco = COCO(self._ann_file) + image_ids = sorted(_coco.getImgIds()) + det_model = get_detector(self._opt) + dets = [] + for entry in tqdm(_coco.loadImgs(image_ids)): + abs_path = os.path.join( + self._root, self._img_prefix, entry['file_name']) + det = det_model.detect_one_img(abs_path) + if det: + dets += det + pathlib.Path(os.path.split(det_file)[0]).mkdir(parents=True, exist_ok=True) + json.dump(dets, open(det_file, 'w')) + + @property + def joint_pairs(self): + """Joint pairs which defines the pairs of joint to be swapped + when the image is flipped horizontally.""" + return [[1, 2], [3, 4], [5, 6], [7, 8], + [9, 10], [11, 12], [13, 14], [15, 16], [17, 18], [19, 20]] diff --git a/alphapose/datasets/__init__.py b/alphapose/datasets/__init__.py index 333c5fbd..72eac370 100644 --- a/alphapose/datasets/__init__.py +++ b/alphapose/datasets/__init__.py @@ -4,4 +4,4 @@ from .mscoco import Mscoco from .mpii import Mpii -__all__ = ['CustomDataset', 'Mscoco', 'Mscoco_det', 'Mpii', 'ConcatDataset'] +__all__ = ['CustomDataset', 'Mscoco', 'Mscoco_det', 'Mpii', 'ConcatDataset', 'Freihand'] From 60bcf091d7486581d12caa7e855e06f86d250126 Mon Sep 17 00:00:00 2001 From: Dazz993 Date: Sun, 8 Mar 2020 11:13:37 +0800 Subject: [PATCH 2/3] Update visualization for Freihand dataset --- alphapose/utils/metrics.py | 4 ++-- alphapose/utils/vis.py | 38 ++++++++++++++++++++++++++++++++++---- alphapose/utils/writer.py | 5 +---- 3 files changed, 37 insertions(+), 10 deletions(-) diff --git a/alphapose/utils/metrics.py b/alphapose/utils/metrics.py index f7e17cb3..8d2e1460 100644 --- a/alphapose/utils/metrics.py +++ b/alphapose/utils/metrics.py @@ -66,7 +66,7 @@ def mask_cross_entropy(pred, target): pred, target, reduction='mean')[None] -def evaluate_mAP(res_file, ann_type='bbox', ann_file='person_keypoints_val2017.json', silence=True): +def evaluate_mAP(res_file, ann_type='bbox', ann_file='Freihand_keypoints_val.json', silence=True): """Evaluate mAP result for coco dataset. Parameters @@ -85,7 +85,7 @@ class NullWriter(object): def write(self, arg): pass - ann_file = os.path.join('./data/coco/annotations/', ann_file) + ann_file = os.path.join('./data/Freihand/annotations/', ann_file) if silence: nullwrite = NullWriter() diff --git a/alphapose/utils/vis.py b/alphapose/utils/vis.py index 8eacbba5..265095c7 100644 --- a/alphapose/utils/vis.py +++ b/alphapose/utils/vis.py @@ -36,7 +36,7 @@ def vis_frame_dense(frame, im_res, add_bbox=False, format='coco'): ''' frame: frame image im_res: im_res of predictions - format: coco or mpii + format: coco or mpii or Freihand return rendered image ''' @@ -117,10 +117,11 @@ def vis_frame_fast(frame, im_res, add_bbox=False, format='coco'): ''' frame: frame image im_res: im_res of predictions - format: coco or mpii + format: coco or mpii or Freihand return rendered image ''' + print(im_res, format, flush = True) if format == 'coco': l_pair = [ (0, 1), (0, 2), (1, 3), (2, 4), # Head @@ -142,6 +143,20 @@ def vis_frame_fast(frame, im_res, add_bbox=False, format='coco'): (8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13) ] p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE] + elif format == 'Freihand': + l_pair = [ + (0, 1), (1, 2), (2, 3), (3, 4), (0, 5), (5, 6), (6, 7), (7, 8), (0, 9), (9, 10), + (10, 11), (11, 12), (0, 13), (13, 14), (14, 15), (15, 16), (0, 17), (17, 18), (18, 19), (19, 20)] + p_color = [(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0), + (77, 255, 255), (77, 255, 204), (77, 204, 255), (191, 255, 77), (77, 191, 255), (191, 255, 77), + (204, 77, 255), (77, 255, 204), (191, 77, 255), (77, 255, 191), (127, 77, 255), (77, 255, 127), (0, 255, 255), + (0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0), (77, 255, 255)] + line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50), + (77, 255, 222), (77, 196, 255), (77, 135, 255), (191, 255, 77), (77, 255, 77), + (77, 222, 255), (255, 156, 127), + (0, 127, 255), (255, 127, 77), (0, 77, 255), (255, 77, 36), + (0, 77, 255), (0, 77, 255), (0, 77, 255), (0, 77, 255), + (255, 156, 127)] else: NotImplementedError @@ -188,10 +203,11 @@ def vis_frame(frame, im_res, add_bbox=False, format='coco'): ''' frame: frame image im_res: im_res of predictions - format: coco or mpii + format: coco or mpii or Freihand return rendered image ''' + print(im_res, format, flush = True) if format == 'coco': l_pair = [ (0, 1), (0, 2), (1, 3), (2, 4), # Head @@ -199,7 +215,6 @@ def vis_frame(frame, im_res, add_bbox=False, format='coco'): (17, 11), (17, 12), # Body (11, 13), (12, 14), (13, 15), (14, 16) ] - p_color = [(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0), # Nose, LEye, REye, LEar, REar (77, 255, 255), (77, 255, 204), (77, 204, 255), (191, 255, 77), (77, 191, 255), (191, 255, 77), # LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist (204, 77, 255), (77, 255, 204), (191, 77, 255), (77, 255, 191), (127, 77, 255), (77, 255, 127), (0, 255, 255)] # LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck @@ -215,6 +230,20 @@ def vis_frame(frame, im_res, add_bbox=False, format='coco'): ] p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE] line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE] + elif format == 'Freihand': + l_pair = [ + (0, 1), (1, 2), (2, 3), (3, 4), (0, 5), (5, 6), (6, 7), (7, 8), (0, 9), (9, 10), \ + (10, 11), (11, 12), (0, 13), (13, 14), (14, 15), (15, 16), (0, 17), (17, 18), (18, 19), (19, 20)] + p_color = [(0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0), + (77, 255, 255), (77, 255, 204), (77, 204, 255), (191, 255, 77), (77, 191, 255), (191, 255, 77), + (204, 77, 255), (77, 255, 204), (191, 77, 255), (77, 255, 191), (127, 77, 255), (77, 255, 127), (0, 255, 255), + (0, 255, 255), (0, 191, 255), (0, 255, 102), (0, 77, 255), (0, 255, 0), (77, 255, 255)] + line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255), (0, 255, 50), + (77, 255, 222), (77, 196, 255), (77, 135, 255), (191, 255, 77), (77, 255, 77), + (77, 222, 255), (255, 156, 127), + (0, 127, 255), (255, 127, 77), (0, 77, 255), (255, 77, 36), + (0, 77, 255), (0, 77, 255), (0, 77, 255), (0, 77, 255), + (255, 156, 127)] else: raise NotImplementedError @@ -249,6 +278,7 @@ def vis_frame(frame, im_res, add_bbox=False, format='coco'): transparency = 0.8 img = cv2.addWeighted(bg, transparency, img, 1 - transparency, 0) # Draw keypoints + # print('number of points:', kp_scores.shape[0], flush=True) # need to be deleted for n in range(kp_scores.shape[0]): if kp_scores[n] <= 0.35: continue diff --git a/alphapose/utils/writer.py b/alphapose/utils/writer.py index 97dcadba..088abf2b 100644 --- a/alphapose/utils/writer.py +++ b/alphapose/utils/writer.py @@ -18,9 +18,6 @@ 'frameSize': (640, 480) } -EVAL_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] - - class DataWriter(): def __init__(self, cfg, opt, save_video=False, video_save_opt=DEFAULT_VIDEO_SAVE_OPT, @@ -29,7 +26,7 @@ def __init__(self, cfg, opt, save_video=False, self.opt = opt self.video_save_opt = video_save_opt - self.eval_joints = EVAL_JOINTS + self.eval_joints = list(range(cfg.DATA_PRESET.NUM_JOINTS)) self.save_video = save_video self.final_result = [] self.heatmap_to_coord = get_func_heatmap_to_coord(cfg) From d4b9a3af5f590fa21bd033b4a19e98b5748ae683 Mon Sep 17 00:00:00 2001 From: Dazz993 Date: Sun, 8 Mar 2020 11:14:55 +0800 Subject: [PATCH 3/3] Add config for Freihand --- configs/freihand/.DS_Store | Bin 0 -> 8196 bytes configs/freihand/resnet/.DS_Store | Bin 0 -> 6148 bytes configs/freihand/resnet/224x224_res50.yaml | 66 +++++++++++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 configs/freihand/.DS_Store create mode 100644 configs/freihand/resnet/.DS_Store create mode 100644 configs/freihand/resnet/224x224_res50.yaml diff --git a/configs/freihand/.DS_Store b/configs/freihand/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..0c6e57c9624b4d5761a9152d034e1e802aa033e4 GIT binary patch literal 8196 zcmeHLTWl3Y82hLkOicTU55Vdqch3Z52ngRo$K&7`AKWg|ls1WO87@c1^F?5eUJ4T;8?hJ>uOwsg$QC~R$g)ApW2*~zKt<1WPpm2Rh}tb zqI0+@J?3R|H`v_-f#Xkf%O{N3={X3l&OxB4OZA>d)!l>knCGVLDQi@#@Ckt)`s~YF z6SUfaG0!PvU4K-o95bA;f@1_$r{i#Ae=x9)1-v%wd-;UM!MFBWWM@xUDY7Ql zmAt3kTwr^yzu(IHB~ltWvkcM-gAFr>w^!aE-znEp;fL_)nUTU4eTduE5EO3 zo5A>y;g4CaX}gC9CT!o%I99)FWUWe^RFunW)>bXoeyf*V?SB2gGn() zb&akbFllysJd=-NYUi0EG<7w)KEgHKd5cPuG10tXqptS*v{uas4I4J8YX4E%bj_zJ z@QF7ytNK32$XJeMD$=d3UDXFBBcHpI1^rb)A^OKz0-2^bWO-#I$eLJMqzJ{STMif|H6!OQRloP`VU z9()L&!58oqT!E|b9sB@4!7uPD{06^cIac60ti~El;Jw(2ZP<=U+=;vJ5$wUeIDkVq zj3a2`5p*z%E*9}=d8Dpm0s6{VC7U)^E{Z$;Vo!@Jbfze?G!mr0)9wQ6;B-NsGL?b|x9?`O#l-bf|E zA4w2FoH=5MExd`1e9!gx-Rk;<7`=~pXB}glJlYazoru>m;)^B-qsQyx8yJyViwUiI zUz{urrY(}jo7E;pB-Iv6ZHwBb5RJ7msokP(WyDskTxxfyNrnGt8p!YZcuHX!mU3`G z2EGmF;39kqpA+-0!T0bZ{EUDMS74Q3T`ksOJwAY&u?d^81-Ii4;$J6r<8Iu8z1T+_ z9Ki#45J%A<4jvU8%;6+X;c=WHE}p>WiHR@Ziy;?Z!`JaGJX^xNo)YH0Q_8%omr8~2 zz3io{9hZ*$aj|p0jrq4Wx$(a~lqb@8*YVuL)c4$9ar4xouiHgnk#wly@&C?+-~Vs7 z+sHmbAVT1OgaDSM(y1<*@InVGkF^t&4^ifc+s#TTT&NJ?I7tzXlMKUgVVfXR?vo7V Xtd!J3=^uXxi0=RB{uj)H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0