视觉巡线¶
本章我们来给大家讲一下如何基于视觉巡线来控制小车的运动.
下面这张图像是我们通过小车前面摄像头拍摄出来的原始图像,现在我们希望小车在两根黄色线的中间移动
我们需要识别出黄色的车道线,同时最好能画出车头转向的指示线,最终所表现的结果如下:
基于颜色提取¶
在本例中,车道线表现出来是黄色的,所以我们可以利用我们前面基于HSV颜色范围的方式进行提取,最终提取出来的结果如下图所示.
def fetch_yellow(img):
hsv_img = cv.cvtColor(img,cv.COLOR_BGR2HSV)
low = (25,46,46);
upper = (35,255,255)
binary = cv.inRange(hsv_img,low,upper)
if DEBUG:
cv.imshow("binary color",binary);
return binary
截取ROI¶
根据上面的结果,我们发现离小车很远的位置其实是我们不需要的内容.
小车的移动只需要看着脚下部分的线路即可,不需要看到那么远,基于这样的思考,我们可以来提取ROI,只保留眼前的图像
def fetch_roi(img):
# 提取感兴趣的而区域
height,width = img.shape[0:2]
roi = img[int(height/2):height,0:width]
# 对提取的图像进行闭操作
kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
roi = cv.morphologyEx(roi,cv.MORPH_CLOSE,kernel,iterations=3)
_, contours, _ = cv.findContours(roi,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
dst = np.zeros_like(roi)
for i,c in enumerate(contours):
area = cv.contourArea(c)
if area>200:
cv.drawContours(dst,contours,i,255,-1)
if DEBUG:
cv.imshow("roi",dst)
return dst;
提取边缘¶
前面的结果我们是通过颜色来提取的,有可能混入一些干扰因素.在这里我们使用Canny边缘检测的方式来检测车道线边缘.
因为车道线和地图有明显的颜色差异,所以它肯定是存在边缘的.
def fetch_edge(img):
canny = cv.Canny(img,200,400)
if DEBUG:
cv.imshow("canny",canny)
return canny
找出直线¶
在上面的步骤中,我们先提取了黄色,然后又提取感兴趣的区域,在感兴趣的区域中,我们提取了图像的边缘信息,在这一步我们就需要来找出视线中可能的车道线了
def make_points(line_kb,height):
# 求右边的平均斜率和截距
lines_avg_kb = np.average(line_kb,axis=0);
# y = kx + b 绘制右边的直线
y1 = height*0.5;
x1 = (y1 - lines_avg_kb[1]) / lines_avg_kb[0];
y2 = 0
x2 = (y2 - lines_avg_kb[1]) / lines_avg_kb[0];
return [x1,y1+0.5*height,x2,y2+0.5*height]
def fetch_lines(img,height,width):
lines = cv.HoughLinesP(img,1,np.pi/180,10,minLineLength=40,maxLineGap=5);
left_kb = [];
right_kb = [];
if lines is None:
return [];
boundary = 1/3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary
# 计算每一条线段的斜率和截距
for line in lines:
x1,y1,x2,y2 = line[0];
# 计算当前线段的斜率和截距 y = kx + b
if x1 == x2:
continue
params = np.polyfit((x1,x2),(y1,y2),1);
# 斜率
k = params[0];
# 截距
b = params[1];
if k < 0:
if x1 < left_region_boundary and x2 < left_region_boundary:
left_kb.append((k,b));
else:
if x1 > right_region_boundary and x2 > right_region_boundary:
right_kb.append((k,b));
lines = []
if len(left_kb)>0:
left_line = make_points(left_kb,480)
lines.append(left_line)
if len(right_kb)>0:
right_line = make_points(right_kb,height)
lines.append(right_line)
return lines;
计算引导线¶
def calc_head_line(lines,width,height):
if len(lines) == 0:
return None;
x = width/2
y = height
if len(lines) == 1:
x1,y1,x2,y2 = lines[0]
# params = np.polyfit((x1,x2),(y1,y2),1);
# # 斜率
# k = params[0];
# # 截距
# b = params[1];
x_head = x2 + (x - x1)
y_head = 0.5*height;
# x_head = (y_head - b)/k
return [x,y,x_head,y_head]
else:
_,_,line1_x2,_ = lines[0]
_,_,line2_x2,_ = lines[1]
x_head = (line1_x2 + line2_x2)/2
y_head = 0.5*height
return [x,y,x_head,y_head]
为了方便观察,我们还编写了显示所有线的函数
def show_lines(lines,frame,head_line=None):
if DEBUG:
try:
line_img = np.zeros_like(frame)
for x1,y1,x2,y2 in lines:
cv.line(line_img,(int(x1),int(y1)),(int(x2),int(y2)),(0,255,0),10)
if head_line:
x1,y1,x2,y2 = head_line
cv.line(line_img,(int(x1),int(y1)),(int(x2),int(y2)),(0,0,255),10)
dst = cv.addWeighted(line_img,0.8,frame,1,0)
cv.imshow("dst",dst)
except Exception as e:
print e
计算偏差角度¶
为了便于控制小车转向的角度大小,我们计算引导线偏离屏幕中央的度数. 若引导线偏离中线较大,则角速度要增大,反之角速度要减小.这里其实也是为我们接下来视觉PID引导做铺垫
def calc_angle(x_offset,y_offset):
angle_to_mid_radian = math.atan(x_offset / y_offset)
return angle_to_mid_radian
视觉PID控制小车¶
在上一小结,我们计算了小车的偏离角度,在这一小节我们就需要根据上面的偏离角度来动态控制小车的角速度
我们假定小车的偏离中线角度为0的时候,小车应该前进
小车红色引导线往左偏了,小车左转,角速度为正
小车红色引导线往右偏了,小车右转,角速度为负
并且在这个过程中,偏离的角度越大,小车的角速度也越大,反之越小.这个正好可以用PID来进行控制,基于这样的设想,我们用上位机视觉PID来控制小车,我们的目标就是要把偏离角度控制在0附近
intergral = 0;
derivative = 0;
prev_error = 0;
pid = 0
def pid_control(curr,target):
global intergral,derivative,prev_error,pid
error = target - curr
intergral += error;
derivative = prev_error;
# pid 计算公式
kp = 0.2
ki = 0.0
kd = 0.02
pid = kp*error + ki * intergral + kd* derivative
prev_error = error;
print pid;
return pid
接下来,我们就将上面的代码改成从摄像头获取,这样我们就可以通过PID来控制它了,相应的示例代码如下:
if __name__ == '__main__':
# Give the node a name
rospy.init_node('heima_linefollow', anonymous=False)
# Set rospy to execute a shutdown function when terminating the script
rospy.on_shutdown(shutdown)
# How fast will we check the odometry values?
rate = rospy.Rate(100)
# Publisher to control the robot's speed
cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
capture = cv.VideoCapture(0);
ok,frame = capture.read()
height,width = frame.shape[0:2]
# while ok:
while not rospy.is_shutdown():
ok,frame = capture.read()
# 提取黄色
binary = fetch_yellow(frame)
# 提取看兴趣的内容
roi = fetch_roi(binary)
# 提取边缘
canny = fetch_edge(roi)
# 提取线
lines = fetch_lines(canny,height,width)
head_line = calc_head_line(lines,width,height)
if head_line:
mid = width/2 ;
_,_,curr,_ = head_line
x_offset = curr - mid;
y_offset = height*0.5
angle = calc_angle(x_offset,y_offset)
if angle > 60 or angle < -60:
continue
show_lines(lines,frame,head_line)
value = pid_control(angle,0)
print "x_offset:{},{},{},angle={}".format(x_offset,mid,curr,angle)
# 假设恒定线速度为0.25
R = 0.1445/math.tan(value)
V = 0.10
W = V/R;
twist = Twist()
twist.linear.x = V
twist.angular.z = W
cmd_vel.publish(twist)
cv.imshow("frame",frame)
cv.waitKey(1)
rate.sleep()