From 76df689d48df0b5056769b9c8ca968ac4a0eb261 Mon Sep 17 00:00:00 2001 From: UnavailableDev <69792062+UnavailableDev@users.noreply.github.com> Date: Mon, 15 May 2023 13:56:58 +0200 Subject: optimization, reduced false positive blobs in final list --- openMV/POC_signs_red.py | 55 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 40 insertions(+), 15 deletions(-) diff --git a/openMV/POC_signs_red.py b/openMV/POC_signs_red.py index cb521b9..3279467 100644 --- a/openMV/POC_signs_red.py +++ b/openMV/POC_signs_red.py @@ -22,27 +22,52 @@ while(True): clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. - #lines = img.find_lines() - #for i in lines: - #img.draw_line(i.line(), 255, 8) + ThR = 0 + ThG = 255 + ThB = 128 + threshold_r = [(ThR,255,0,255,255,ThG)] - #gray = img - #gray.to_grayscale() - #img.find_edges(0) + #Red + #if(R >= ThR and G <= thG) + #Blue + #if(B >= thB) - blobs = img.find_blobs(threshold_rgb) + blobs_r = img.find_blobs([(0, 100, 25, 63, -128, 127)]) + blobs_b = img.find_blobs([(0, 29, 11, -128, -31, -5)]) #blobs.count() #print(blobs) ##kpts = img.find_keypoints() - for index, b in enumerate(blobs, 1): - convex = b.convexity() - if convex < 0.8: - img.draw_rectangle(b.rect(),int((512+256)*convex),2) - print(b.convexity()) - #img.draw_line(12,12,200,200,255,8) + print(f"old: { len(blobs_r) + len(blobs_b) }") - print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. + blobs_r[:] = [b for b in blobs_r if (b.convexity() < 0.7 and b.area() > 64)] + blobs_b[:] = [b for b in blobs_b if (b.convexity() < 0.7 and b.area() > 64)] + + print(f"new: { len(blobs_r) + len(blobs_b) }") + + #for index, b in enumerate(blobs_r): + #convex = b.convexity() + #roundn = b.roundness() + #if convex < 0.8: + #img.draw_rectangle(b.rect(),[255,int((256)*roundn),0],2) + #print(index) + #else: + #del blobs_r[index] + #img.draw_rectangle(b.rect(),[128,128,128],4) + + for index, b in enumerate(blobs_r): + roundn = b.roundness() + img.draw_rectangle(b.rect(),[255,int((256)*roundn),0],2) + #print(index) + + for index, b in enumerate(blobs_b): + roundn = b.roundness() + img.draw_rectangle(b.rect(),[0,int((256)*roundn),255],2) + + + # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. + + print("EOC") -- cgit v1.2.3 From f2335307a26a8ab3e18d8990d2640a8de4cbd0e4 Mon Sep 17 00:00:00 2001 From: UnavailableDev <69792062+UnavailableDev@users.noreply.github.com> Date: Tue, 6 Jun 2023 11:57:48 +0200 Subject: keypoint sign recognition --- openMV/POC_signs_red.py | 108 +++++++++++++++++++++++++++++------------------- 1 file changed, 66 insertions(+), 42 deletions(-) diff --git a/openMV/POC_signs_red.py b/openMV/POC_signs_red.py index 3279467..c996a88 100644 --- a/openMV/POC_signs_red.py +++ b/openMV/POC_signs_red.py @@ -4,70 +4,94 @@ import sensor, image, time -# Color Tracking Thresholds (Grayscale Min, Grayscale Max) -min_rgb = 128 -max_rgb = 255 -threshold_list = [(min_rgb, max_rgb)]# only bright grey colours will get tracked. -threshold_rgb = [(0, 100, 75, 32, 2, 127)] #only find red -#threshold_rgb = [(18, 78, -8, 127, 24, 127)] - sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) #sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) +sensor.set_framesize(sensor.HVGA) # Set frame size to QVGA (320x240) sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - ThR = 0 - ThG = 255 - ThB = 128 - threshold_r = [(ThR,255,0,255,255,ThG)] +def init_kpts(str): + temp_img = image.Image(f"./{str}.jpg", copy_to_fb=True) + temp_img.to_grayscale() + + kpts = temp_img.find_keypoints(max_keypoints=128, threshold=kpts_threshold, corner_detector=kpts_corner, scale_factor=1.2) + return kpts + +def match_kpts(kpts0, kpts1): + if kpts0 is not None and kpts1 is not None: + + match = image.match_descriptor(kpts0, kpts1, threshold=70) + #print("matched:%d dt:%d"%(match.count(), match.theta())) + + if match.count() > 0: + print(match.count()) + + return match.count() > 1 + + else: + return 0 + +def read_red_sign(val, img, kpts): + + #img.draw_keypoints(kpts,255) + + if match_kpts(kpts, stop): + img.draw_rectangle(val.rect()) + #img.draw_cross(match.cx(), match.cy(), size=10) + print("stop") - #Red - #if(R >= ThR and G <= thG) + if match_kpts(kpts, speed): + img.draw_rectangle(val.rect()) + print("speed") - #Blue - #if(B >= thB) + if match_kpts(kpts, car): + img.draw_rectangle(val.rect()) + print("car") +#def read_red_sign(val, img, kpts): + + +kpts_threshold = 20 +kpts_corner = image.CORNER_FAST + +speed = init_kpts("speed") +stop = init_kpts("stop") +car = init_kpts("image") + + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + + ######## Detect signs blobs_r = img.find_blobs([(0, 100, 25, 63, -128, 127)]) blobs_b = img.find_blobs([(0, 29, 11, -128, -31, -5)]) - #blobs.count() - #print(blobs) - ##kpts = img.find_keypoints() - - print(f"old: { len(blobs_r) + len(blobs_b) }") + #print(f"old: { len(blobs_r) + len(blobs_b) }") blobs_r[:] = [b for b in blobs_r if (b.convexity() < 0.7 and b.area() > 64)] blobs_b[:] = [b for b in blobs_b if (b.convexity() < 0.7 and b.area() > 64)] + #print(f"new: { len(blobs_r) + len(blobs_b) }") + + + ######## Read signs + img = img.to_grayscale() - print(f"new: { len(blobs_r) + len(blobs_b) }") + if(len(blobs_r) > 0 or len(blobs_b) > 0): - #for index, b in enumerate(blobs_r): - #convex = b.convexity() - #roundn = b.roundness() - #if convex < 0.8: - #img.draw_rectangle(b.rect(),[255,int((256)*roundn),0],2) - #print(index) - #else: - #del blobs_r[index] - #img.draw_rectangle(b.rect(),[128,128,128],4) + kpts_img = img.find_keypoints(max_keypoints=255, threshold=kpts_threshold, corner_detector=kpts_corner) - for index, b in enumerate(blobs_r): - roundn = b.roundness() - img.draw_rectangle(b.rect(),[255,int((256)*roundn),0],2) - #print(index) + for index, b in enumerate(blobs_r): + read_red_sign(b, img, kpts_img) - for index, b in enumerate(blobs_b): - roundn = b.roundness() - img.draw_rectangle(b.rect(),[0,int((256)*roundn),255],2) + for index, b in enumerate(blobs_b): + read_blu_sign(b, img, kpts_img) # Note: OpenMV Cam runs about half as fast when connected # to the IDE. The FPS should increase once disconnected. + #img = gr - print("EOC") + #print("EOC") -- cgit v1.2.3 From 46acbbb6fd27cc00dadcc9118d774ae674f8eef5 Mon Sep 17 00:00:00 2001 From: UnavailableDev <69792062+UnavailableDev@users.noreply.github.com> Date: Tue, 6 Jun 2023 12:04:29 +0200 Subject: mv file --- nicla/signs_detect.py | 97 +++++++++++++++++++++++++++++++++++++++++++++++++ openMV/POC_signs_red.py | 97 ------------------------------------------------- 2 files changed, 97 insertions(+), 97 deletions(-) create mode 100644 nicla/signs_detect.py delete mode 100644 openMV/POC_signs_red.py diff --git a/nicla/signs_detect.py b/nicla/signs_detect.py new file mode 100644 index 0000000..c996a88 --- /dev/null +++ b/nicla/signs_detect.py @@ -0,0 +1,97 @@ +# Hello World Example +# +# Welcome to the OpenMV IDE! Click on the green run arrow button below to run the script! + +import sensor, image, time + +sensor.reset() # Reset and initialize the sensor. +sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) +#sensor.set_pixformat(sensor.GRAYSCALE) +sensor.set_framesize(sensor.HVGA) # Set frame size to QVGA (320x240) +sensor.skip_frames(time = 2000) # Wait for settings take effect. +clock = time.clock() # Create a clock object to track the FPS. + + +def init_kpts(str): + temp_img = image.Image(f"./{str}.jpg", copy_to_fb=True) + temp_img.to_grayscale() + + kpts = temp_img.find_keypoints(max_keypoints=128, threshold=kpts_threshold, corner_detector=kpts_corner, scale_factor=1.2) + return kpts + +def match_kpts(kpts0, kpts1): + if kpts0 is not None and kpts1 is not None: + + match = image.match_descriptor(kpts0, kpts1, threshold=70) + #print("matched:%d dt:%d"%(match.count(), match.theta())) + + if match.count() > 0: + print(match.count()) + + return match.count() > 1 + + else: + return 0 + +def read_red_sign(val, img, kpts): + + #img.draw_keypoints(kpts,255) + + if match_kpts(kpts, stop): + img.draw_rectangle(val.rect()) + #img.draw_cross(match.cx(), match.cy(), size=10) + print("stop") + + if match_kpts(kpts, speed): + img.draw_rectangle(val.rect()) + print("speed") + + if match_kpts(kpts, car): + img.draw_rectangle(val.rect()) + print("car") + +#def read_red_sign(val, img, kpts): + + +kpts_threshold = 20 +kpts_corner = image.CORNER_FAST + +speed = init_kpts("speed") +stop = init_kpts("stop") +car = init_kpts("image") + + +while(True): + clock.tick() # Update the FPS clock. + img = sensor.snapshot() # Take a picture and return the image. + + ######## Detect signs + + blobs_r = img.find_blobs([(0, 100, 25, 63, -128, 127)]) + blobs_b = img.find_blobs([(0, 29, 11, -128, -31, -5)]) + #print(f"old: { len(blobs_r) + len(blobs_b) }") + + blobs_r[:] = [b for b in blobs_r if (b.convexity() < 0.7 and b.area() > 64)] + blobs_b[:] = [b for b in blobs_b if (b.convexity() < 0.7 and b.area() > 64)] + #print(f"new: { len(blobs_r) + len(blobs_b) }") + + + ######## Read signs + img = img.to_grayscale() + + if(len(blobs_r) > 0 or len(blobs_b) > 0): + + kpts_img = img.find_keypoints(max_keypoints=255, threshold=kpts_threshold, corner_detector=kpts_corner) + + for index, b in enumerate(blobs_r): + read_red_sign(b, img, kpts_img) + + for index, b in enumerate(blobs_b): + read_blu_sign(b, img, kpts_img) + + + # Note: OpenMV Cam runs about half as fast when connected + # to the IDE. The FPS should increase once disconnected. + #img = gr + + #print("EOC") diff --git a/openMV/POC_signs_red.py b/openMV/POC_signs_red.py deleted file mode 100644 index c996a88..0000000 --- a/openMV/POC_signs_red.py +++ /dev/null @@ -1,97 +0,0 @@ -# Hello World Example -# -# Welcome to the OpenMV IDE! Click on the green run arrow button below to run the script! - -import sensor, image, time - -sensor.reset() # Reset and initialize the sensor. -sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -#sensor.set_pixformat(sensor.GRAYSCALE) -sensor.set_framesize(sensor.HVGA) # Set frame size to QVGA (320x240) -sensor.skip_frames(time = 2000) # Wait for settings take effect. -clock = time.clock() # Create a clock object to track the FPS. - - -def init_kpts(str): - temp_img = image.Image(f"./{str}.jpg", copy_to_fb=True) - temp_img.to_grayscale() - - kpts = temp_img.find_keypoints(max_keypoints=128, threshold=kpts_threshold, corner_detector=kpts_corner, scale_factor=1.2) - return kpts - -def match_kpts(kpts0, kpts1): - if kpts0 is not None and kpts1 is not None: - - match = image.match_descriptor(kpts0, kpts1, threshold=70) - #print("matched:%d dt:%d"%(match.count(), match.theta())) - - if match.count() > 0: - print(match.count()) - - return match.count() > 1 - - else: - return 0 - -def read_red_sign(val, img, kpts): - - #img.draw_keypoints(kpts,255) - - if match_kpts(kpts, stop): - img.draw_rectangle(val.rect()) - #img.draw_cross(match.cx(), match.cy(), size=10) - print("stop") - - if match_kpts(kpts, speed): - img.draw_rectangle(val.rect()) - print("speed") - - if match_kpts(kpts, car): - img.draw_rectangle(val.rect()) - print("car") - -#def read_red_sign(val, img, kpts): - - -kpts_threshold = 20 -kpts_corner = image.CORNER_FAST - -speed = init_kpts("speed") -stop = init_kpts("stop") -car = init_kpts("image") - - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - - ######## Detect signs - - blobs_r = img.find_blobs([(0, 100, 25, 63, -128, 127)]) - blobs_b = img.find_blobs([(0, 29, 11, -128, -31, -5)]) - #print(f"old: { len(blobs_r) + len(blobs_b) }") - - blobs_r[:] = [b for b in blobs_r if (b.convexity() < 0.7 and b.area() > 64)] - blobs_b[:] = [b for b in blobs_b if (b.convexity() < 0.7 and b.area() > 64)] - #print(f"new: { len(blobs_r) + len(blobs_b) }") - - - ######## Read signs - img = img.to_grayscale() - - if(len(blobs_r) > 0 or len(blobs_b) > 0): - - kpts_img = img.find_keypoints(max_keypoints=255, threshold=kpts_threshold, corner_detector=kpts_corner) - - for index, b in enumerate(blobs_r): - read_red_sign(b, img, kpts_img) - - for index, b in enumerate(blobs_b): - read_blu_sign(b, img, kpts_img) - - - # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. - #img = gr - - #print("EOC") -- cgit v1.2.3 From e239429c8885a7c152194a9f25401204258a0fbd Mon Sep 17 00:00:00 2001 From: UnavailableDev <69792062+UnavailableDev@users.noreply.github.com> Date: Tue, 6 Jun 2023 12:08:31 +0200 Subject: cleanup --- nicla/signs_detect.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/nicla/signs_detect.py b/nicla/signs_detect.py index c996a88..baf62e8 100644 --- a/nicla/signs_detect.py +++ b/nicla/signs_detect.py @@ -1,12 +1,7 @@ -# Hello World Example -# -# Welcome to the OpenMV IDE! Click on the green run arrow button below to run the script! - import sensor, image, time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) -#sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.HVGA) # Set frame size to QVGA (320x240) sensor.skip_frames(time = 2000) # Wait for settings take effect. clock = time.clock() # Create a clock object to track the FPS. @@ -15,28 +10,20 @@ clock = time.clock() # Create a clock object to track the FPS. def init_kpts(str): temp_img = image.Image(f"./{str}.jpg", copy_to_fb=True) temp_img.to_grayscale() - kpts = temp_img.find_keypoints(max_keypoints=128, threshold=kpts_threshold, corner_detector=kpts_corner, scale_factor=1.2) return kpts def match_kpts(kpts0, kpts1): if kpts0 is not None and kpts1 is not None: - match = image.match_descriptor(kpts0, kpts1, threshold=70) #print("matched:%d dt:%d"%(match.count(), match.theta())) - if match.count() > 0: print(match.count()) - return match.count() > 1 - else: return 0 def read_red_sign(val, img, kpts): - - #img.draw_keypoints(kpts,255) - if match_kpts(kpts, stop): img.draw_rectangle(val.rect()) #img.draw_cross(match.cx(), match.cy(), size=10) @@ -80,7 +67,6 @@ while(True): img = img.to_grayscale() if(len(blobs_r) > 0 or len(blobs_b) > 0): - kpts_img = img.find_keypoints(max_keypoints=255, threshold=kpts_threshold, corner_detector=kpts_corner) for index, b in enumerate(blobs_r): @@ -88,10 +74,3 @@ while(True): for index, b in enumerate(blobs_b): read_blu_sign(b, img, kpts_img) - - - # Note: OpenMV Cam runs about half as fast when connected - # to the IDE. The FPS should increase once disconnected. - #img = gr - - #print("EOC") -- cgit v1.2.3 From bb19171eaa717acec95a7473267db8ce38f208b9 Mon Sep 17 00:00:00 2001 From: lonkaars Date: Thu, 8 Jun 2023 11:57:16 +0200 Subject: add research --- assets/blob_invpers.pdf | Bin 0 -> 344612 bytes assets/blob_traffic_lights.pdf | Bin 0 -> 474299 bytes doc/dui.md | 67 ++++++++++++++++++++++++++++++++++++-- matlab/invpers.m | 72 +++++++++++++++++++++++++++++++++++++++++ matlab/traffic_lights.m | 69 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 206 insertions(+), 2 deletions(-) create mode 100644 assets/blob_invpers.pdf create mode 100644 assets/blob_traffic_lights.pdf create mode 100644 matlab/invpers.m create mode 100644 matlab/traffic_lights.m diff --git a/assets/blob_invpers.pdf b/assets/blob_invpers.pdf new file mode 100644 index 0000000..0c8b071 Binary files /dev/null and b/assets/blob_invpers.pdf differ diff --git a/assets/blob_traffic_lights.pdf b/assets/blob_traffic_lights.pdf new file mode 100644 index 0000000..44491f2 Binary files /dev/null and b/assets/blob_traffic_lights.pdf differ diff --git a/doc/dui.md b/doc/dui.md index 6260fb5..e5a425f 100644 --- a/doc/dui.md +++ b/doc/dui.md @@ -34,8 +34,8 @@ description in section \ref{problem-statement}. \begin{enumerate} \item Driving when not on a road is not allowed \item The vehicle can follow a road by steering itself accordingly - \item Driving off the road is only allowed when necessary for the camera to - keep seeing the road + \item \label{req:offroad} Driving off the road is only allowed when + necessary for the camera to keep seeing the road \end{enumerate} \item \label{req:traffic-lights} The vehicle handles traffic lights in the following way @@ -237,6 +237,38 @@ For more information about Radon transform algorithms check the below links: - \citetitle{matlab:radon} - \citetitle{opencv:radon} +#### Blob detection for roads + +As mentioned in section \ref{specifications} requirement \ref{req:offroad}, the +car is allowed to drive off-road to keep the road visible. This means that a +naive approach where the car drives towards where 'the most road' is could +suffice for our road detection needs. + +A simple prototype for this approach was made using Matlab, shown in figure +\ref{fig:matlab-roaddetect}. The top part of the figure shows the raw camera +image (flipped), with a gray line down the middle, and a red arrow showing the +steering value. The red arrow is the only 'output' of this algorithm. + +The bottom part of the figure shows the detected blobs (green bounding boxes) +on a copy of the original top image with the following transforms: + +1. Reverse perspective-transform +2. Gaussian blur (3x3 kernel) to smooth out any noise caused by the floor + texture +3. Threshold to match most of the light image parts (road) + +The steering value (red arrow) is calculated by averaging the horizontal screen +position (normalized to between -1 and 1) using a weight factor calculated by +using each blobs bounding box area. The weight factor has a minimum 'base' +value that is added, and has a maximum value so large blobs don't 'overpower' +smaller blobs. This is so the inside road edge of a turn doesn't get lost +because the outer edge has a larger bounding box. + +![Road detection prototype in Matlab](../assets/blob_invpers.pdf){#fig:matlab-roaddetect} + +The implementation of this road detection algorithm is provided in the source +code tree. + ### Which algorithm is suitable for our project? We have identified four different types of line detection algorithms that could potentially be used for our project. To decide on the best algorithm, we need to consider various factors such as accuracy, efficiency, and ease of use. While processing time is not a critical factor in our case, we need to ensure that the algorithm we choose meets our requirements and is accessible through the platform we are using, which is currently openMV but may change to openCV in the future. Therefore, our priority is to select an algorithm that is easy to implement, provides accurate results, and is compatible with our platform. @@ -516,6 +548,37 @@ solution (this requires testing). } \signRecognitionConclusion +## Traffic light detection using blobs + +A simple traffic light detection algorithm using blob detection can take +advantage of the following properties of traffic lights (as seen by the Nicla +module): + +- Traffic lights are mostly dark +- Traffic lights are generally rectangular +- Traffic lights have fixed spots where the saturation and hue value ranges are + known if the light is on + +The algorithm has the following steps: + +1. Apply a threshold to keep only dark areas +2. Prune any blobs with too little surface area +3. Prune any blobs that aren't a vertical rectangle with approximately the same + aspect ratio as a traffic light +4. Poke three points down the center of each blob, at 20%, 50%, and 80% of the + blob's height +5. Check if any of the three points has a matching hue and saturation range of + a lit up light +6. The first point that matches is deemed to be the traffic light's color, if + no points match, it's probably not a traffic light + +Figure \ref{fig:matlab-trafficlight} shows this algorithm on an example image: + +![Traffic light detection prototype in Matlab](../assets/blob_traffic_lights.pdf){#fig:matlab-trafficlight} + +The implementation of this road detection algorithm is provided in the source +code tree. + # Conclusion \communicationConclusion diff --git a/matlab/invpers.m b/matlab/invpers.m new file mode 100644 index 0000000..e881ed8 --- /dev/null +++ b/matlab/invpers.m @@ -0,0 +1,72 @@ +clf +clc + +WIDTH = 480; +HEIGHT = 320; +MAX_AREA = WIDTH * HEIGHT / 10; + +HORIZON = 140; +STRETCH = 105; +SQUEEZE = 145; + +movingPoints = [STRETCH HORIZON; (WIDTH-STRETCH) HORIZON; WIDTH HEIGHT; 0 HEIGHT]; +fixedPoints = [0 0;WIDTH 0; (WIDTH-SQUEEZE) HEIGHT; SQUEEZE HEIGHT]; +t = fitgeotrans(movingPoints,fixedPoints,'projective'); + +x = imread('00021.jpg'); +x = imrotate(x, 180); +o = x; + +r = imref2d(size(x),[1 size(x,2)],[1 size(x,1)]); +x = imwarp(x,r,t,'OutputView',r); + +x = imgaussfilt(x, 3); +x = rgb2hsv(x); + +x = x(:,:,3); +x = imadjust(x); +x = x > 0.8; + +[lmap, lcount] = bwlabel(x); + +subplot(2,1,1); +hold on; +imshow(o); +plot([(WIDTH/2) (WIDTH/2)], [0 HEIGHT], 'Color', '#888', 'LineWidth', 2); + +subplot(2,1,2); +hold on; +imshow(label2rgb(lmap, 'jet', 'black')); + +sum = 0; +count = 0; +for i = 1:lcount + props = regionprops(lmap == i, 'BoundingBox', 'Area'); + s = props.BoundingBox; + sx = s(1); + sy = s(2); + lx = s(1) + s(3); + ly = s(2) + s(4); + width = lx - sx; + height = ly - sy; + area_weight = 40 + min(MAX_AREA, width * height); + horizontal_pos = (sx + width/2) / WIDTH; + sum = sum + horizontal_pos * area_weight; + count = count + area_weight; + rectangle('Position', [s(1) s(2) s(3) s(4)], 'EdgeColor', 'green'); +end + +avg = sum / count; +avg = avg * 2 - 1; +avg = max(-1, min(1, avg)); +avg + +subplot(2,1,1); +quiver(WIDTH/2,HEIGHT/2,avg*WIDTH/2,0,0,'linewidth',3,'color','r') + +if abs(avg) < 0.1 + "straight ahead" +else + temp = ["left", "right"]; + temp((sign(avg) + 1) / 2 + 1) +end \ No newline at end of file diff --git a/matlab/traffic_lights.m b/matlab/traffic_lights.m new file mode 100644 index 0000000..8804778 --- /dev/null +++ b/matlab/traffic_lights.m @@ -0,0 +1,69 @@ +clf +clc +hold on; + +x = imread('010.jpg'); +x = imrotate(x, 180); +o = x; +x = rgb2hsv(x); + +t = x(:,:,3); +t = imadjust(t); +t = t < 0.15; + +[lmap, lcount] = bwlabel(t); + +imshow(label2rgb(lmap, 'jet', 'black')); +imshow(o); +for i = 1:lcount + s = regionprops(lmap == i, 'BoundingBox').BoundingBox; + sx = s(1); + sy = s(2); + lx = s(1) + s(3); + ly = s(2) + s(4); + width = lx - sx; + height = ly - sy; + area = width * height; + if area < 450 + continue + end + aspect = height / width; + % stoplichten zullen wel een verhouding van ongeveer 2.2 hebben + if abs(aspect - 2.2) > 0.5 + continue + end + + red_light = [round(sx + width / 2), round(sy + 0.2 * height)]; + yellow_light = [round(sx + width / 2), round(sy + 0.5 * height)]; + green_light = [round(sx + width / 2), round(sy + 0.8 * height)]; + + light_status = 0; % 0 = geen lamp, 1 = rood, 2 = geel, 3 = groen + + % x(red_light(2), red_light(1), :) + if (light_status == 0) && ... + (abs(x(red_light(2), red_light(1), 1) - 0.5) > 0.4) && ... + (x(red_light(2), red_light(1), 2) > 0.4) + light_status = 1; + plot(red_light(1), red_light(2), '.'); + end + if (light_status == 0) && ... + (abs(x(yellow_light(2), yellow_light(1), 1) - 0.1) < 0.1) && ... + (x(yellow_light(2), yellow_light(1), 2) > 0.4) + light_status = 2; + plot(yellow_light(1), yellow_light(2), '.'); + end + if (light_status == 0) && ... + (abs(x(green_light(2), green_light(1), 1) - 0.4) < 0.1) && ... + (x(green_light(2), green_light(1), 2) > 0.4) + light_status = 3; + plot(green_light(1), green_light(2), '.'); + end + + if light_status == 0 + continue + end + + rectangle('Position', [s(1) s(2) s(3) s(4)], 'EdgeColor', 'green'); + status = ["rood" "geel" "groen"]; status(light_status) +end + -- cgit v1.2.3 From a6ea4db7c5b4fe942eb7bed4deee19efa6789b44 Mon Sep 17 00:00:00 2001 From: lonkaars Date: Thu, 8 Jun 2023 12:31:45 +0200 Subject: implement garbage filter --- nicla/garbage_filter.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 nicla/garbage_filter.py diff --git a/nicla/garbage_filter.py b/nicla/garbage_filter.py new file mode 100644 index 0000000..3afd2c3 --- /dev/null +++ b/nicla/garbage_filter.py @@ -0,0 +1,40 @@ +traffic_light_garbage = list() + +""" +filter garbage + +arguments: +arr -- garbage filter memory list +val -- input value +sensitivity -- minimum amount of `val` in `arr` to return `val` +limit -- max length of `arr` + +return value: +if `arr` contains `sensitivity` or more of any item, that item will be +returned, else None is returned +""" +def garbage_filter(arr, val, sensitivity, limit): + if val == None: return None + arr[:] = [None]*(limit - len(arr)) + arr + arr.pop(0) + arr.append(val) + if len([x for x in arr if x == val]) >= sensitivity: + return val + return None + +if __name__ == "__main__": + inputs = [ + "red", + None, + "green", + "green", + None, + "red", + "green", + "red", + "red", + None, + None + ] + for x in inputs: + print(garbage_filter(traffic_light_garbage, x, 3, 4)) -- cgit v1.2.3