-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
105 lines (76 loc) · 3.19 KB
/
train.py
File metadata and controls
105 lines (76 loc) · 3.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import cv2
import os
import numpy as np
from model import getRecognitionModel
import random
# Directory where preprocessed images are saved
preprocessed_dir = '/Users/venkat/Desktop/UCLA_CS/Summer_projects/FaceAuth/Data/preprocessed_data'
# Initialize the face recognizer
face_recognizer = getRecognitionModel()
# Adjust the chunk size as needed
chunk_size = 100 # Define the chunk size for training
# Subset size (0 to use all images)
# subset_size = 100
# Resize dimensions (smaller images = smaller model)
resize_dim = (200, 200)
model_save_path = '/Users/venkat/Desktop/UCLA_CS/Summer_projects/FaceAuth/Weights/face_recognizer_model.yml'
if os.path.exists(model_save_path):
os.remove(model_save_path)
print(f"File '{model_save_path}' has been deleted.")
else:
print(f"File '{model_save_path}' does not exist.")
def get_subset(data, subset_size):
"""
Returns a subset of the data while ensuring the last three data points are included.
Parameters:
data (list): The original dataset.
subset_size (int): The size of the desired subset (including the last three data points).
Returns:
list: The subset of the data.
"""
# Ensure subset_size is at least 3
if subset_size < 3:
raise ValueError("Subset size must be at least 3 to include the last three data points.")
# The number of random samples to select
num_random_samples = subset_size - 3
# Select random samples from the dataset excluding the last three data points
random_samples = random.sample(data[:-3], num_random_samples)
# Add the last three data points to the subset
subset = random_samples + data[-3:]
return subset
def load_chunk(chunk_start, chunk_end):
faces = []
labels = []
with open(os.path.join(preprocessed_dir, 'labels.txt'), 'r') as label_file:
lines = label_file.readlines()[chunk_start:chunk_end]
for line in lines:
face_filename, label = line.strip().split(',')
face_path = os.path.join(preprocessed_dir, face_filename)
face = cv2.imread(face_path, cv2.IMREAD_GRAYSCALE)
# Resize image to a smaller size
face = cv2.resize(face, resize_dim)
faces.append(face)
labels.append(int(label))
# faces = get_subset(faces, subset_size)
# labels = get_subset(labels, subset_size)
faces = np.array(faces)
labels = np.array(labels)
print(labels)
return faces, labels
# Get total number of images
total_images = sum(1 for _ in open(os.path.join(preprocessed_dir, 'labels.txt')))
# if subset_size != 0:
# total_images = subset_size
# Load and train in chunks
for i in range(0, total_images, chunk_size):
print(f"Processing chunk {i // chunk_size + 1}...")
faces, labels = load_chunk(i, min(i + chunk_size, total_images))
# Using train on the first chunk, update for subsequent ones
if i == 0:
face_recognizer.train(faces, labels)
else:
face_recognizer.update(faces, labels)
print(f"Chunk {i // chunk_size + 1} processed.")
# Save the trained model
face_recognizer.save(model_save_path)
print(f"Model trained and saved at {model_save_path}")