import tensorflow as tf
from tensorflow.keras import layers, models
import numpy as np
class ImageClassifier:
def __init__(self, num_classes, input_shape=(224, 224, 3)):
self.num_classes = num_classes
self.input_shape = input_shape
self.model = self.build_model()
def build_model(self):
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=self.input_shape),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(128, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(512, activation='relu'),
layers.Dropout(0.5),
layers.Dense(self.num_classes, activation='softmax')
])
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
def preprocess_image(self, image_path):
image = tf.keras.preprocessing.image.load_img(
image_path, target_size=self.input_shape[:2]
)
image_array = tf.keras.preprocessing.image.img_to_array(image)
image_array = np.expand_dims(image_array, axis=0)
return image_array / 255.0
def predict(self, image_path):
processed_image = self.preprocess_image(image_path)
predictions = self.model.predict(processed_image)
return predictions[0]
from transformers import AutoTokenizer, AutoModel
import torch
import torch.nn as nn
class BERTClassifier(nn.Module):
def __init__(self, model_name='bert-base-uncased', num_classes=2):
super(BERTClassifier, self).__init__()
self.bert = AutoModel.from_pretrained(model_name)
self.dropout = nn.Dropout(0.3)
self.classifier = nn.Linear(768, num_classes)
def forward(self, input_ids, attention_mask):
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
pooled_output = outputs.pooler_output
output = self.dropout(pooled_output)
return self.classifier(output)
class TextClassifier:
def __init__(self, model_path):
self.tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
self.model = BERTClassifier()
self.model.load_state_dict(torch.load(model_path))
self.model.eval()
def predict(self, text):
encoding = self.tokenizer(
text,
truncation=True,
padding='max_length',
max_length=512,
return_tensors='pt'
)
with torch.no_grad():
outputs = self.model(
encoding['input_ids'],
encoding['attention_mask']
)
probabilities = torch.nn.functional.softmax(outputs, dim=-1)
return probabilities.numpy()[0]
import cv2
import numpy as np
class FaceDetector:
def __init__(self):
self.face_cascade = cv2.CascadeClassifier(
cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'
)
self.eye_cascade = cv2.CascadeClassifier(
cv2.data.haarcascades + 'haarcascade_eye.xml'
)
def detect_faces(self, image_path):
img = cv2.imread(image_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(
gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)
)
results = []
for (x, y, w, h) in faces:
face_roi_gray = gray[y:y+h, x:x+w]
face_roi_color = img[y:y+h, x:x+w]
eyes = self.eye_cascade.detectMultiScale(face_roi_gray)
results.append({
'face_coords': (x, y, w, h),
'eyes_count': len(eyes),
'confidence': self.calculate_confidence(face_roi_gray)
})
# Draw rectangles
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(face_roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
return img, results
def calculate_confidence(self, face_region):
# Simple confidence calculation based on contrast
return cv2.Laplacian(face_region, cv2.CV_64F).var()
import numpy as np
class NeuralNetwork:
def __init__(self, layers):
self.layers = layers
self.weights = []
self.biases = []
for i in range(len(layers) - 1):
w = np.random.randn(layers[i], layers[i + 1]) * 0.01
b = np.zeros((1, layers[i + 1]))
self.weights.append(w)
self.biases.append(b)
def sigmoid(self, x):
return 1 / (1 + np.exp(-np.clip(x, -250, 250)))
def sigmoid_derivative(self, x):
return x * (1 - x)
def forward(self, X):
self.activations = [X]
activation = X
for i in range(len(self.weights)):
z = np.dot(activation, self.weights[i]) + self.biases[i]
activation = self.sigmoid(z)
self.activations.append(activation)
return activation
def backward(self, X, y, learning_rate):
m = X.shape[0]
# Calculate error for output layer
error = self.activations[-1] - y
# Backpropagate
for i in range(len(self.weights) - 1, -1, -1):
delta = error * self.sigmoid_derivative(self.activations[i + 1])
# Update weights and biases
self.weights[i] -= learning_rate * np.dot(self.activations[i].T, delta) / m
self.biases[i] -= learning_rate * np.sum(delta, axis=0, keepdims=True) / m
# Calculate error for previous layer
if i > 0:
error = np.dot(delta, self.weights[i].T)
def train(self, X, y, epochs, learning_rate):
for epoch in range(epochs):
self.forward(X)
self.backward(X, y, learning_rate)
if epoch % 100 == 0:
loss = np.mean((self.activations[-1] - y) ** 2)
print(f'Epoch {epoch}, Loss: {loss:.4f}')