FACE ATTENDANCE SYSTEM DEMOSTRATION :
STEP : 1
CHECKING WEBCAM.
STEP : 2
CAPTURE FACE BY WEBCAM & STORED IN A FOLDER.
STEP : 3
TRAINED THE FACE IMAGES BY AI.
STEP : 4
NOW RECOGNISED FACE BY WEBCAM WITH AI.
Different type of difficulties and problem faced by programmer, super user and AI Creator on digital world. Some of technical solutions that faced and overcome or solve by me with this tips and tricks.
STEP : 1
CHECKING WEBCAM.
STEP : 2
CAPTURE FACE BY WEBCAM & STORED IN A FOLDER.
STEP : 3
TRAINED THE FACE IMAGES BY AI.
STEP : 4
NOW RECOGNISED FACE BY WEBCAM WITH AI.
Python Code Is Now Here : (Write and Test It Proper Indentation.)
import cv2
import face_recognition
import os
import numpy as np
# Load the images from the folder
folder_path = '../userPhoto'
image_files = os.listdir(folder_path)
# Initialize arrays to store known face encodings and names
known_encodings = []
known_names = []
known_images = []
# Load the known face images and compute their encodings
for image_file in image_files:
image_path = os.path.join(folder_path, image_file)
image = face_recognition.load_image_file(image_path)
face_locations = face_recognition.face_locations(image)
if len(face_locations) > 0:
encoding = face_recognition.face_encodings(image, face_locations)[0]
known_encodings.append(encoding)
known_names.append(os.path.splitext(image_file)[0])
known_images.append(cv2.resize(image, (100, 100))) # Resize image for thumbnail display
# Initialize the webcam
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame from the webcam
ret, frame = video_capture.read()
# Convert the frame to RGB for face recognition
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Detect faces in the frame
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
# Iterate over detected faces
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# Compare the face with known encodings
distances = face_recognition.face_distance(known_encodings, face_encoding)
min_distance_index = np.argmin(distances)
min_distance = distances[min_distance_index]
if min_distance <= 0.45: # Adjust the threshold as needed
name = known_names[min_distance_index]
accuracy = (1 - min_distance) * 100 # Calculate accuracy percentage
thumbnail = known_images[min_distance_index]
# Draw a rectangle around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
# Display the name and accuracy below the face rectangle
text = f"{name}: {accuracy:.2f}%"
cv2.putText(frame, text, (left, bottom + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 1)
# Display the thumbnail image in the top right corner
frame[10:110, frame.shape[1] - 110:frame.shape[1] - 10] = thumbnail
else:
name = "Unknown"
# Draw a rectangle around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Display the name below the face rectangle
cv2.putText(frame, name, (left, bottom + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)
# Display the resulting frame
cv2.imshow('Face Recognition', frame)
# Quit the program if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the webcam and close windows
video_capture.release()
cv2.destroyAllWindows()
include "zklibrary.php";$zk = new ZKLibrary('192.168.1.7', 4370);$zk->connect();$zk->disableDevice();$users = $zk->getUser();
{$useridname[$key][0] = $user[0];$useridname[$key][1] = $user[1];}
$attendance = $zk->getAttendance();
<tr>
<td width="25">No</td><td>ID</td><td>Name</td><td>Role</td><td>Date</td><td>Time</td>
</tr>