Skip to content

Commit

Permalink
draft
Browse files Browse the repository at this point in the history
  • Loading branch information
itisacloud committed Nov 7, 2023
1 parent dffee85 commit d58485c
Show file tree
Hide file tree
Showing 2 changed files with 102 additions and 40 deletions.
15 changes: 12 additions & 3 deletions sketch_map_tool/tasks.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from io import BytesIO
from uuid import UUID
from zipfile import ZipFile
from PIL import Image

import geojson
from celery.result import AsyncResult
Expand All @@ -26,6 +27,8 @@
polygonize,
prepare_img_for_markings,
)
from sketch_map_tool.upload_processing.detect_markings import apply_sam

from sketch_map_tool.wms import client as wms_client


Expand Down Expand Up @@ -66,6 +69,7 @@ def generate_sketch_map(
format_,
scale,
)
map_img.write
db_client_celery.insert_map_frame(map_img, uuid)
return map_pdf

Expand All @@ -79,9 +83,10 @@ def generate_quality_report(bbox: Bbox) -> BytesIO | AsyncResult:
report = get_report(bbox)
return generate_report_pdf(report)


# 2. DIGITIZE RESULTS
#


@celery.task()
def georeference_sketch_maps(
file_ids: list[int],
Expand Down Expand Up @@ -136,13 +141,17 @@ def process(
) -> FeatureCollection:
"""Process a Sketch Map."""
# r = interim result

r = db_client_celery.select_file(sketch_map_id)
r = to_array(r)
r = clip(r, map_frames[uuid])
r = prepare_img_for_markings(map_frames[uuid], r)
r = prepare_img_for_markings(map_frames[uuid], r, sketch_map_id)
diffImage = Image.fromarray(r)
geojsons = []
masks = apply_sam(diffImage)

for color in COLORS:
r_ = detect_markings(r, color)
r_ = detect_markings(masks, diffImage, r , color)
r_ = georeference(r_, bbox)
r_ = polygonize(r_, color)
r_ = geojson.load(r_)
Expand Down
127 changes: 90 additions & 37 deletions sketch_map_tool/upload_processing/detect_markings.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,18 @@
import numpy as np
from numpy.typing import NDArray
from PIL import Image, ImageEnhance
from transformers import pipeline

model = "facebook/sam-vit-base"

kwargs = {}


def detect_markings(
sketch_map_frame: NDArray,
color: str,
threshold_bgr: float = 0.5,
masks,
rawImage,
sketch_map_frame: NDArray,
color: str,
) -> NDArray:
"""
Detect markings in the colours blue, green, red, pink, turquoise, white, and yellow.
Expand All @@ -25,45 +31,19 @@ def detect_markings(
considered 255, all values below this threshold will be considered 0 for determining the
colour of the markings.
"""
threshold_bgr_abs = threshold_bgr * 255

colors = {
"white": (255, 255, 255),
"red": (0, 0, 255),
"blue": (255, 0, 0),
"green": (0, 255, 0),
"yellow": (0, 255, 255),
"turquoise": (255, 255, 0),
"pink": (255, 0, 255),
}
bgr = colors[color]

# for color, bgr in colors.items():
colors = [average_color_inside_mask(rawImage, mask) for mask in masks]
masks = [mask for mask, clr in zip(masks, colors) if clr == color]
single_color_marking = np.zeros_like(sketch_map_frame, np.uint8)
single_color_marking[
(
(sketch_map_frame[:, :, 0] < threshold_bgr_abs)
== (bgr[0] < threshold_bgr_abs)
)
& (
(sketch_map_frame[:, :, 1] < threshold_bgr_abs)
== (bgr[1] < threshold_bgr_abs)
)
& (
(sketch_map_frame[:, :, 2] < threshold_bgr_abs)
== (bgr[2] < threshold_bgr_abs)
)
] = 255
single_color_marking = _reduce_noise(single_color_marking)
single_color_marking = _reduce_holes(single_color_marking)
single_color_marking[single_color_marking > 0] = 255
for mask in masks:
single_color_marking[mask] = 1
return single_color_marking


def prepare_img_for_markings(
img_base: NDArray,
img_markings: NDArray,
threshold_img_diff: int = 100,
img_base: NDArray,
img_markings: NDArray,
id: int,
threshold_img_diff: int = 100,
) -> NDArray:
"""
TODO pydoc
Expand All @@ -85,6 +65,7 @@ def prepare_img_for_markings(
mask_markings = img_diff_gray > threshold_img_diff
markings_multicolor = np.zeros_like(img_markings, np.uint8)
markings_multicolor[mask_markings] = img_markings[mask_markings]

return markings_multicolor


Expand Down Expand Up @@ -128,3 +109,75 @@ def _reduce_holes(img: NDArray, factor: int = 4) -> NDArray:
"""
# See https://docs.opencv.org/4.x/d9/d61/tutorial_py_morphological_ops.html
return cv2.morphologyEx(img, cv2.MORPH_CLOSE, np.ones((factor, factor), np.uint8))


def average_color_inside_mask(image, mask):
# Convert image to NumPy array
img_array = np.array(image)

# Extract RGB values of pixels inside the mask
masked_pixels = img_array[mask]

# Calculate the average color
if len(masked_pixels) > 0:
average_color = np.mean(masked_pixels, axis=0)
return average_color.astype(np.uint8) # Convert to integer values
else:
return None


def padding(img, padding=-30):
width, height = img.size

# Calculate the new image size
new_width = width + 2 * padding
new_height = height + 2 * padding

# Create a new blank image with the new size and a white background
new_img = Image.new("RGB", (new_width, new_height), (255, 255, 255))

# Calculate the position to paste the original image with negative padding
x_offset = padding
y_offset = padding

# Paste the original image onto the new image
new_img.paste(img, (x_offset, y_offset))
return new_img


def closest_color(rgb_value, threshold=0.5):
colors = {
"blue": [0, 0, 255],
"green": [0, 255, 0],
"red": [255, 0, 0],
"pink": [255, 0, 255],
"turquoise": [0, 255, 255],
"yellow": [255, 255, 0],
}
# Convert the input RGB value to a NumPy array
target = np.array(rgb_value)

# Initialize variables for the closest color
closest_color = None
closest_distance = float('inf')

# Iterate through the predefined colors
for color_name, color_rgb in colors.items():
color_rgb = np.array(color_rgb)

# Apply the threshold to the color comparison
distance = np.linalg.norm(target - color_rgb)

# Check if this color is closer
if distance < closest_distance:
closest_color = color_name
closest_distance = distance

return closest_color


def apply_sam(img):
generator = pipeline("mask-generation", model=model)
outputs = generator(img, **kwargs)
masks = outputs["masks"]
return masks

0 comments on commit d58485c

Please sign in to comment.