UR Robot + Webcam move follow Detected Object and Color
Python + Opencv + Webcam -> Control Dual Arm UR Robot via Socket TCP/IP
การควบคุม UR Robot โดยการหา find position x,y in pixel of each picture frame from video capture by OpenCV
Python code:
Python + Opencv + Webcam -> Control Dual Arm UR Robot via Socket TCP/IP
การควบคุม UR Robot โดยการหา find position x,y in pixel of each picture frame from video capture by OpenCV
Python code:
import socket import time import cv2 import numpy as np from shapedetector import ShapeDetector class control(): def move(x,z): x = str((x+40) / 1000.0) y = str(-499 / 1000.0) z = str(z+331 / 1000.0) ro = str(1.64) pi = str(0.7) ya = str(-1) print(x,y,z,ro,pi,ya) mm = "movej(p[" + x + "," + y + "," + z + "," + ro + "," + pi + "," + ya + "],a=0.4,v=4)" + "\n" s.send(mm.encode('utf-8')) #self.s.close() HOST = "192.168.1.9" # The remote hostPORT = 30003 # The same port as used by the server#PORT = 30003 # The same port as used by the servers = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) control.move(0,0) cap = cv2.VideoCapture(0) count = 0while (1): _, frame = cap.read() img = frame hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) InRange_L = np.array([0, 180, 87], dtype=np.uint8) InRange_H= np.array([42, 255, 255], dtype=np.uint8) orange = cv2.inRange(hsv, InRange_L, InRange_H) mask = cv2.bitwise_and(frame, frame, mask=orange) gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY) kernel = np.ones((5, 5), np.uint8) gray = cv2.dilate(gray,kernel,iterations=3) # cv2.imshow('gray',gray) blurred = cv2.GaussianBlur(gray, (5, 5), 0) thresh = cv2.threshold(blurred, 50, 250, cv2.THRESH_BINARY)[1] cv2.imshow('T', thresh) # find contour image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnt = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) cv2.drawContours(cnt, contours, -1, (0, 255, 0), 2) sd = ShapeDetector() dst = img xPos = 0 zPos = 0 # find center of object for c in range(len(contours)): M = cv2.moments(contours[c]) rect = cv2.minAreaRect(contours[c]) box = cv2.boxPoints(rect) box = np.int0(box) angle = int(rect[2]) # Box2D get (x,y),(width,height),theta cv2.drawContours(dst, [box], 0, (0, 0, 255), 2) # shape = sd.detect(c) cx = 0 cy = 0 if (M['m00'] != 0): cx = int(M['m10'] / M['m00']) cy = int(M['m01'] / M['m00']) xPos = 320-cx zPos = 240-cy cv2.circle(dst, (cx, cy), 7, (0, 0, 0), -1) Pos = str(cx) + ',' + str(cy) + ',' + str(angle) cv2.putText(dst, Pos, (cx - 30, cy - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) count += 1 if (xPos != 0): print (zPos) if (count >= 25): control.move(xPos/2,zPos/2000) count = 0 # time.sleep(0.1) # cv2.imshow('frame', frame) # cv2.imshow('yellow', orange) # cv2.imshow('mask', mask) cv2.imshow('img', img) k = cv2.waitKey(5) & 0xFF if k == 27: s.close() break cv2.destroyAllWindows()
ความคิดเห็น
แสดงความคิดเห็น