The objective of this lab was to incorporate old labs and accomplish navigation and planning both in simulation and on the physical robot, ideally using things from previous labs.
def add_line(self, currPoints, goalPoints):
deltaX = -currPoints[1] + goalPoints[1]
deltaY = -currPoints[0] + goalPoints[0]
if (abs(deltaX) > abs(deltaY)):
if(deltaX > 0): #positive delta
if(self.grid[currPoints[0], currPoints[1]+1] == 1): #obstacle that way
delta_out = self.follow_wall(currPoints, goalPoints, 0) #todo: implement
return delta_out
else: #no obstacle, go that way
return currPoints[0], currPoints[1]+1
else: #negative delta
if(self.grid[currPoints[0], currPoints[1]-1] == 1): #obstacle that way
delta_out = self.follow_wall(currPoints, goalPoints, 1) #todo: implement
return delta_out
else: #no obstacle, go that way
return currPoints[0], currPoints[1]-1
else:
if(deltaY > 0): #positive delta
if(self.grid[currPoints[0]+1, currPoints[1]] == 1): #obstacle that way
delta_out = self.follow_wall(currPoints, goalPoints, 2) #todo: implement
return delta_out
else: #no obstacle, go that way
return currPoints[0]+1, currPoints[1]
else: #negative delta
if(self.grid[currPoints[0]-1, currPoints[1]] == 1): #obstacle that way
delta_out = self.follow_wall(currPoints, goalPoints, 3) #todo: implement
return delta_out
else: #no obstacle, go that way
return currPoints[0]-1, currPoints[1]
def follow_wall(self, currPoints, goalPoints, moveDir):
deltaX = -currPoints[1] + goalPoints[1]
deltaY = -currPoints[0] + goalPoints[0]
leftWall = self.grid[currPoints[0], currPoints[1]-1] == 1
rightWall = self.grid[currPoints[0], currPoints[1]+1] == 1
topWall = self.grid[currPoints[0]-1, currPoints[1]] == 1
bottomWall = self.grid[currPoints[0]+1, currPoints[1]] == 1
############################################################
if(moveDir == 0): #right wall
if(deltaY>0): #goal below
if(not bottomWall):
return currPoints[0]+1, currPoints[1] #move down
elif (not leftWall):
return currPoints[0], currPoints[1]-1 #move left
else:
return currPoints[0]-1, currPoints[1] #move up
else: #goal above
if(not topWall):
return currPoints[0]-1, currPoints[1] #move up
elif (not leftWall):
return currPoints[0], currPoints[1]-1 #move left
else:
return currPoints[0]+1, currPoints[1] #move down
Wall following is very naive: it first calculates where the walls are in relation to the current position, knowing the direction the robot would ideally like to move in, and picks the next best option. For instance, if the robot would like to move left, but moving down would also help get closer to the goal, it moves in that direction. This algorithm deprioritizes but still allows for moving away from the goal if that is the only option. Below are several examples of the algorithm working. I did not originally have a large obstacle that would obstruct many paths, so I later added a false obstacle to ensure the wall following was working as intended.
stepY = (interm_steps[i+1][0]*-.79/16)*2;
stepX = (interm_steps[i+1][1]*(2.05/23)-1.85)*2;
Once I had done that, I could visualize these points by entering them iteratively as ground truth points on the simulator, which immediately worked very well.
# Reset Plots
robot.reset()
loc.plotter.reset_plot()
for i in range(len(interm_steps)):
#print(interm_steps[i])
nextStepY = (interm_steps[i][0]*-.79/16)*2;
nextStepX = (interm_steps[i][1]*(2.05/23)-1.85)*2;
loc.plotter.plot_point(nextStepX, nextStepY, GT)
if(i is not len(interm_steps)-1):
stepY = (interm_steps[i+1][0]*-.79/16)*2;
stepX = (interm_steps[i+1][1]*(2.05/23)-1.85)*2;
move_to_next((nextStepX,nextStepY, 0), (stepX,stepY, 0))
loc.plotter.plot_point(robot.get_gt_pose()[0], robot.get_gt_pose()[1], ODOM)
As you can see in the above code, I have a move_to_next script that actually moves the simulated robot from point to point. It uses closed loop feedback to analyze where the robot is in x, y, and theta space, and moves accordingly. For instance, in a left move, the robot will turn until the angle is 180 degrees, then it will continue slowly forward until the gt position is close enough to what the next point is supposed to be. This is one direction; the others are very similar.
def move_to_next(currPos, nextPos):
deltaX = nextPos [0] - currPos[0]
deltaY = nextPos [1] - currPos[1]
if(currPos[0]>nextPos[0]): #moving left
print("moving left")
nextAng = 180;
robot.set_vel(0,.1)
while(round(robot.get_gt_pose()[2]) != nextAng): #wait till it gets to the right angle
robot.set_vel(.1,0)
while(round(robot.get_gt_pose()[0],1) != round(nextPos[0], 1)): #wait till it gets to the right distance
robot.set_vel(0,0)
As seen in the following videos, this code worked great and I was able to watch the robot move from point to point. Also included is an image of where the robot is moving vs where its supposed to be moving, which is very close. The robot moved very slowly, so this video is sped up 8x
void loop()
{
setMotors(0, 0);
for(int i = 0; i<len(movePoints); i ++){ //till penultimate row
moveToPoint(i, movePoints[i][1], movePoints[i][0], movePoints[i+1][1], movePoints[i+1][0] ); //number, currX, currY, nextX, nextY
}
///returns current value of yaw
int getYaw(){
while(not(myICM.dataReady())){}
if( myICM.dataReady() ){
//Read yaw data
myICM.getAGMT(); // The values are only updated when you call 'getAGMT'
dt = (millis()-lastRead)/1000;
lastRead = millis();
yaw_g = yaw_g+myICM.gyrZ()*dt;
int normyaw = -1;
//normalize the angle
if(yaw_g < 0){
while(normyaw<0){
normyaw = yaw_g + 360;
}
}
else{
normyaw = yaw_g;
}
//Serial.print("Yaw:");
//Serial.println(yaw_g);
return(int(yaw_g));
}
}
void setMotors(int motor1, int motor2){
myMotorDriver.setDrive( 1, 1, motor1);
myMotorDriver.setDrive( 0, 1, motor2);
}
int getDist(){
distanceSensor.startRanging(); //Write configuration bytes to initiate measurement
distance = distanceSensor.getDistance(); //Get the result of the measurement from the sensor
distanceSensor.clearInterrupt();
distanceSensor.stopRanging();
//Serial.print("Distance(mm): ");
//Serial.println(distance);
return distance;
}
Then, I created the main program. It essentially does the same thing as in my sim, where it determines where (up down left or right) the next point is, and then turns the robot to face in that direction and moves till it has gone far enough to be one "block," which I started as .15m. Below is an example of one of the directions; it is repeated similiarly for others.
void moveToPoint(int i, int currX, int currY, int nextX, int nextY){
if(currX&lf;nextX){ //moving left
Serial.print("moving left");
nextAng = 180;
///////Turn left///////
setMotors(motorSpeed, -motorSpeed);
while(currAngle != nextAng){
currAngle = getYaw();
delay(10);
} //wait till it gets to the right angle
Serial.println("angle correct");
startDist = getDist();
setMotors(motorSpeed, motorSpeed);
while(currDist >= startDist - blockDistX){
currDist = getDist();
} //wait till it gets to the right distance
setMotors(0, 0);
}
}
setMotors(motorSpeed, 0);
if(dirStatus == 1) { delay(spinTime*3);} //were up, going left
else if(dirStatus == 2) { delay(spinTime*2);} //were right, going left
else if(dirStatus == 3) { delay(spinTime);} //were down, going left
dirStatus = 0;
spinTime was a variable that I tuned to be approximately a 90 degree turn. In order to get my robot to turn properly, I did end up having to put masking tape on the wheels, which made it very slippery.
Once this was working, which I confirmed by printing out to the command line with the motors disengaged, I would move the robot forward one square, again setting the motors to a constant and waiting a delay of also a constant that was empirically tested.