diff options
-rw-r--r-- | doc/dui.md | 12 | ||||
-rw-r--r-- | nicla/road.py | 39 | ||||
-rw-r--r-- | nicla/signs_detect.py | 41 | ||||
-rw-r--r-- | nicla/traffic_light.py | 7 |
4 files changed, 64 insertions, 35 deletions
@@ -313,6 +313,16 @@ As you can see there is quite a lot of difference between them. This function ne All the above algorithms could be used with OpenCV, But the Radon transform needs more work than the others with the amount of information in the doc. + +\def\roadConclusion{ +In conclusion, line detection offers various possibilities, and through testing and experimentation, we employed the blobbing method using OpenMV due to the limitations with WiFi communication. Additionally, we chose blobbing for its robust and straightforward algorithm. However, while I still believe that the Hough transform is generally superior in terms of diversity and consistency in line detection, it presented challenges in our specific project due to the varying floors and lighting conditions, requiring additional image processing. + +Later in the project, we discovered a new image processing technique called image correction, also known as bird's eye view. This approach allowed us to visualize more lines and provided greater opportunities when combined with the Hough transform. Although blobbing also utilized bird's eye view, the Hough transform offered more precision. + +In summary, we chose for blobbing due to its robust and simplified algorithm, resulting in higher frames per second (fps). However, the Hough transform demonstrated greater precision when combined with bird's eye view. Considering the time constraints, achieving optimal integration between these techniques proved to be challenging. +}. +\roadConclusion + ## Communication between the Nicla and Zumo In order to make the Zumo robot both detect where it is on a road, and steer to @@ -586,4 +596,4 @@ code tree. \signDetectionColorConclusion \signDetectionShapeConclusion \signRecognitionConclusion - +\roadConclusion diff --git a/nicla/road.py b/nicla/road.py index 143a9c5..2c4e84a 100644 --- a/nicla/road.py +++ b/nicla/road.py @@ -27,6 +27,28 @@ points = [(STRETCH, HORIZON), (WIDTH-1+SQUEEZE, HEIGHT-1), (-SQUEEZE, HEIGHT-1)] +class CircularBuffer: + def __init__(self, size): + self.buffer = [None] * size + self.size = size + self.index = 0 + self.counter = 0 + self.output_value = None + + def add(self, value): + self.buffer[self.index] = value + self.index = (self.index + 1) % self.size + + if self.counter > 0 and self.buffer[self.index] == self.output_value: + self.counter += 1 + else: + self.output_value = value + self.counter = 1 + + if self.counter == self.size * 2 // 3: + return self.output_value + + return None def drive(img): img.to_grayscale() @@ -54,19 +76,20 @@ def drive(img): uart.uart_buffer(steerByte) +traffic_buffer = CircularBuffer(2) +sign_buffer = CircularBuffer(3) while(True): - #img = sensor.snapshot() - #data = traffic_light.traf_lights(img) - #if data is not None: - #uart.uart_buffer(data) - + img = sensor.snapshot() + data = traffic_buffer.add(traffic_light.traf_lights(img)) + if data is not None: + uart.uart_buffer(data) sign_img = sensor.snapshot() - data_sign = signs_detect.sign_detection(sign_img) - if data_sign is not None: - uart.uart_buffer(data_sign) + data = sign_buffer.add(signs_detect.sign_detection(sign_img)) + if data is not None: + uart.uart_buffer(data) drive_img = sensor.snapshot() drive(drive_img) diff --git a/nicla/signs_detect.py b/nicla/signs_detect.py index baf62e8..70bc633 100644 --- a/nicla/signs_detect.py +++ b/nicla/signs_detect.py @@ -13,45 +13,39 @@ def init_kpts(str): kpts = temp_img.find_keypoints(max_keypoints=128, threshold=kpts_threshold, corner_detector=kpts_corner, scale_factor=1.2) return kpts +speed = init_kpts("speed") +stop = init_kpts("stop") +car = init_kpts("image") + def match_kpts(kpts0, kpts1): if kpts0 is not None and kpts1 is not None: match = image.match_descriptor(kpts0, kpts1, threshold=70) #print("matched:%d dt:%d"%(match.count(), match.theta())) if match.count() > 0: print(match.count()) - return match.count() > 1 + return match.count() > 0 else: return 0 def read_red_sign(val, img, kpts): + data = 0x00 if match_kpts(kpts, stop): - img.draw_rectangle(val.rect()) + #img.draw_rectangle(val.rect()) #img.draw_cross(match.cx(), match.cy(), size=10) - print("stop") - + #print("stop") + return 0x01 if match_kpts(kpts, speed): img.draw_rectangle(val.rect()) - print("speed") - + #print("speed") + return 0x02 if match_kpts(kpts, car): img.draw_rectangle(val.rect()) - print("car") + #print("car") + return 0x03 #def read_red_sign(val, img, kpts): - -kpts_threshold = 20 -kpts_corner = image.CORNER_FAST - -speed = init_kpts("speed") -stop = init_kpts("stop") -car = init_kpts("image") - - -while(True): - clock.tick() # Update the FPS clock. - img = sensor.snapshot() # Take a picture and return the image. - +def sign_detection(img): ######## Detect signs blobs_r = img.find_blobs([(0, 100, 25, 63, -128, 127)]) @@ -70,7 +64,8 @@ while(True): kpts_img = img.find_keypoints(max_keypoints=255, threshold=kpts_threshold, corner_detector=kpts_corner) for index, b in enumerate(blobs_r): - read_red_sign(b, img, kpts_img) + sign_buffer = read_red_sign(b, img, kpts_img) - for index, b in enumerate(blobs_b): - read_blu_sign(b, img, kpts_img) + #for index, b in enumerate(blobs_b): + #sign_buffer = read_blu_sign(b, img, kpts_img) + return sign_buffer diff --git a/nicla/traffic_light.py b/nicla/traffic_light.py index 9499aea..f445690 100644 --- a/nicla/traffic_light.py +++ b/nicla/traffic_light.py @@ -32,6 +32,7 @@ def rgb2hsv(rgb): def traf_lights(imgTraffic): + original = imgTraffic.copy(copy_to_fb=True) img = imgTraffic.to_grayscale() for blob in img.find_blobs([(0, 60)], pixels_threshold=100): aspect = blob.h() / blob.w() @@ -62,10 +63,10 @@ def traf_lights(imgTraffic): #print(("", "rood", "geel", "groen")[light_status]) if light_status == 1: - return 0x06 - elif light_status == 2: return 0x07 - elif light_status == 3: + elif light_status == 2: return 0x08 + elif light_status == 3: + return 0x09 else: return 0x01 |