import cv2
import numpy as np
from IPython.display import clear_output
import IPython

# comunication from Python to arduino
import serial, time
import struct

arduino = serial.Serial('COM6', 1000000, timeout=.1)
time.sleep(1)  # give the connection a second to settle
arduino.write(("Hello from Python!").encode())

# Set camera resolution.
cameraWidth = 640
cameraHeight = 480

face_classifier = cv2.CascadeClassifier('Haarcascades/haarcascade_frontalface_default.xml')

# calculate the middle screen
midScreenX = int(cameraWidth / 2)
midScreenY = int(cameraHeight / 2)

midScreenWindow = 1

def printArduino2Python(q1,q2):
    # sending string to arduino

    arduino.write(q1.encode())
    arduino.write('and'.encode())
    arduino.write(q2.encode())

    time.sleep(0.05)  # give the connection a second to settle

    # print the data from arduino to python



    data = ''
    data = arduino.readline()[:]  # the last bit gets rid of the new-line chars
    if data:
        data = str(data)
        data = data.replace("b", "").replace("\\r", "").replace("\\n", "").replace("'", "")
        print(data)  # Read the newest output from the Arduino

    data = ''
    data = arduino.readline()[:]  # the last bit gets rid of the new-line chars
    if data:
        data = str(data)
        data = data.replace("b", "").replace("\\r", "").replace("\\n", "").replace("'", "")
        print(data)  # Read the newest output from the Arduino

    print("-------------------------")





def face_detector(img):
    # Convert image to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Clasify face from the gray image
    faces = face_classifier.detectMultiScale(gray, 1.3, 5)
    if faces is ():
        return img, []

    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 2)
        # find the midpoint of the first face in the frame.
        xCentre = int(x + (w / 2))
        yCentre = int(y + (w / 2))
        cv2.circle(img, (xCentre, yCentre), 5, (0, 255, 255), -1)

        localization = [xCentre, yCentre]
        # print(localization)
    return img, localization

# Open Webcam
cap = cv2.VideoCapture(1)  # 0 is camera device number, 0 is for internal webcam and 1 will access the first connected usb webcam

# Set camera resolution. The max resolution is webcam dependent
# so change it to a resolution that is both supported by your camera
# and compatible with your monitor
cap.set(cv2.CAP_PROP_FRAME_WIDTH, cameraWidth)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cameraHeight)

while True:

    # Capture frame-by-frame
    ret, frame = cap.read()

    # mirror the frame
    frame = cv2.flip(frame, 1)

    image, face = face_detector(frame)

    # cv2.line(image, starting cordinates, ending cordinates, color, thickness)
    # vertical line
    cv2.line(image, (midScreenX , 0), (midScreenX, cameraHeight), (255, 127, 0),2)

    # horizontal line
    cv2.line(image, (0, midScreenY), (cameraWidth, midScreenY), (255, 127, 0),2)

    # only if the face is recoginize
    if len(face) == 2:
        midFaceX = face[0]
        midFaceY = face[1]

        a=1800 #adjust this according to the distance of the person

        # calculating the distance errors x and y
        dx = midFaceX- midScreenX
        dy = midScreenY - midFaceY

        # finding the angle to fix the errors
        if((dy != 0) & (dx != 0)): # to prevent an invalid result when its division is zero
            #calculating the angle q1 and q2
            q1 = np.arctan(dx/a)
            q2 = np.arctan(dy/a)



            q1Str = str(q1)
            q1Str = q1Str[0:5] # Truncate the string to 5 characters.
            print("q1:", q1Str)
            print("q1deg:", np.degrees(q1))


            q2Str = str(q2)
            q2Str = q2Str[0:5] # Truncate the string to 5 characters.
            print("q2:", q2Str)
            print("q2deg:", np.degrees(q2))

            printArduino2Python(q1Str,q2Str)

            #Print value q1 and q2 on the image
            cv2.putText(image, "q1: " + str(np.degrees(q1)), (250, 400), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
            cv2.putText(image, "q2: " + str(np.degrees(q2)), (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)





        cv2.putText(image, "dx: " + str(dx), (0, 400), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
        cv2.putText(image, "dy: " + str(dy), (0, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
            # print(x)
            # print(y)




    # Display the resulting frame
    cv2.imshow('Video', frame)

    # Press Q on keyboard to  exit
    if cv2.waitKey(25) & 0xFF == ord('q'):
        break

# When everything is done, release the capture
# video_capture.release()
app = IPython.Application.instance()
app.kernel.do_shutdown(True)  # turn off kernel
clear_output()  # clear output
cv2.destroyAllWindows()