Spaces:
Sleeping
Sleeping
Commit
·
a33d902
1
Parent(s):
28a7299
test
Browse files- .env +5 -0
- Dockerfile +16 -0
- main.py +51 -0
- requirements.txt +21 -0
- services/cloudinary.py +15 -0
- services/db.py +30 -0
- services/feature_extraction.py +30 -0
.env
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CLOUDINARY_CLOUD_NAME="doon4qkgm"
|
| 2 |
+
CLOUDINARY_API_KEY=735892612685739
|
| 3 |
+
CLOUDINARY_API_SECRET=49jRAR4VWshrRAC25UaBTjmws9U
|
| 4 |
+
MONGODB_URI=mongodb+srv://zuhasohail2003:[email protected]/?retryWrites=true&w=majority&appName=ImageSearchFYP
|
| 5 |
+
PORT=5000
|
Dockerfile
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
| 2 |
+
# you will also find guides on how best to write your Dockerfile
|
| 3 |
+
|
| 4 |
+
FROM python:3.9
|
| 5 |
+
|
| 6 |
+
RUN useradd -m -u 1000 user
|
| 7 |
+
USER user
|
| 8 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 9 |
+
|
| 10 |
+
WORKDIR /app
|
| 11 |
+
|
| 12 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
| 13 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 14 |
+
|
| 15 |
+
COPY --chown=user . /app
|
| 16 |
+
CMD ["gunicorn", "-b", "0.0.0.0:7860", "main:app"]
|
main.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import tempfile
|
| 3 |
+
from flask import Flask, request, jsonify
|
| 4 |
+
from flask_cors import CORS
|
| 5 |
+
from services.db import save_image_data, find_similar_images
|
| 6 |
+
from services.cloudinary import upload_to_cloudinary
|
| 7 |
+
from services.feature_extraction import extract_features
|
| 8 |
+
|
| 9 |
+
app = Flask(__name__)
|
| 10 |
+
CORS(app, resources={r"/api/*": {"origins": "*"}},supports_credentials=True) # Allow all origins
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@app.route("/api/search", methods=["POST"])
|
| 16 |
+
def search_image():
|
| 17 |
+
file = request.files["file"]
|
| 18 |
+
|
| 19 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp:
|
| 20 |
+
file.save(temp.name)
|
| 21 |
+
temp_path = temp.name
|
| 22 |
+
|
| 23 |
+
query_features = extract_features(temp_path)
|
| 24 |
+
similar_images = find_similar_images(query_features)
|
| 25 |
+
os.remove(temp_path)
|
| 26 |
+
|
| 27 |
+
return jsonify({
|
| 28 |
+
"similar_images": [
|
| 29 |
+
{"url": img[0], "score": round(img[1], 4)} for img in similar_images
|
| 30 |
+
]
|
| 31 |
+
})
|
| 32 |
+
|
| 33 |
+
@app.route("/api/upload", methods=["POST"])
|
| 34 |
+
def upload_image():
|
| 35 |
+
file = request.files["file"]
|
| 36 |
+
|
| 37 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp:
|
| 38 |
+
file.save(temp.name)
|
| 39 |
+
temp_path = temp.name
|
| 40 |
+
|
| 41 |
+
cloud_url = upload_to_cloudinary(temp_path)
|
| 42 |
+
features = extract_features(temp_path)
|
| 43 |
+
save_image_data({"image_url": cloud_url, "features": features})
|
| 44 |
+
|
| 45 |
+
os.remove(temp_path)
|
| 46 |
+
return jsonify({"message": "Image uploaded", "url": cloud_url})
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
if __name__ == "__main__":
|
| 50 |
+
port = int(os.environ.get("PORT", 5000))
|
| 51 |
+
app.run(host="0.0.0.0", port=port)
|
requirements.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
blinker==1.9.0
|
| 2 |
+
certifi==2025.1.31
|
| 3 |
+
click==8.1.8
|
| 4 |
+
cloudinary==1.43.0
|
| 5 |
+
colorama==0.4.6
|
| 6 |
+
dnspython==2.7.0
|
| 7 |
+
faiss-cpu==1.10.0
|
| 8 |
+
Flask==3.1.0
|
| 9 |
+
flask-cors==5.0.1
|
| 10 |
+
gunicorn==23.0.0
|
| 11 |
+
Jinja2==3.1.6
|
| 12 |
+
MarkupSafe==2.1.5
|
| 13 |
+
numpy==2.0.2
|
| 14 |
+
pillow==11.0.0
|
| 15 |
+
pymongo==4.11.3
|
| 16 |
+
python-dotenv==1.0.1
|
| 17 |
+
torch==2.6.0
|
| 18 |
+
torchvision==0.21.0
|
| 19 |
+
tqdm==4.67.1
|
| 20 |
+
urllib3==2.3.0
|
| 21 |
+
Werkzeug==3.1.3
|
services/cloudinary.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cloudinary
|
| 2 |
+
import cloudinary.uploader
|
| 3 |
+
from config import config
|
| 4 |
+
from config.config import CLOUDINARY_CLOUD_NAME, CLOUDINARY_API_KEY, CLOUDINARY_API_SECRET
|
| 5 |
+
|
| 6 |
+
print("🚀 Cloudinary setup successful", CLOUDINARY_CLOUD_NAME)
|
| 7 |
+
cloudinary.config(
|
| 8 |
+
cloud_name=CLOUDINARY_CLOUD_NAME,
|
| 9 |
+
api_key=CLOUDINARY_API_KEY,
|
| 10 |
+
api_secret=CLOUDINARY_API_SECRET
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
def upload_to_cloudinary(image_path):
|
| 14 |
+
response = cloudinary.uploader.upload(image_path)
|
| 15 |
+
return response["secure_url"]
|
services/db.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from pymongo import MongoClient
|
| 3 |
+
from config.config import MONGODB_URI
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
client = MongoClient(MONGODB_URI)
|
| 8 |
+
db = client["image_search"]
|
| 9 |
+
collection = db["images"]
|
| 10 |
+
|
| 11 |
+
def save_image_data(image_data):
|
| 12 |
+
collection.insert_one(image_data)
|
| 13 |
+
|
| 14 |
+
def get_all_images():
|
| 15 |
+
return list(collection.find({}, {"_id": 0}))
|
| 16 |
+
|
| 17 |
+
def cosine_similarity(vec1, vec2):
|
| 18 |
+
"""Computes cosine similarity between two vectors."""
|
| 19 |
+
return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
|
| 20 |
+
|
| 21 |
+
def find_similar_images(query_vector, top_n=5):
|
| 22 |
+
"""Finds top N similar images based on cosine similarity."""
|
| 23 |
+
images = get_all_images()
|
| 24 |
+
|
| 25 |
+
similarities = [
|
| 26 |
+
(image["image_url"], cosine_similarity(query_vector, image["features"]))
|
| 27 |
+
for image in images
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
return sorted(similarities, key=lambda x: x[1], reverse=True)[:top_n]
|
services/feature_extraction.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import torch
|
| 3 |
+
import torchvision.models as models
|
| 4 |
+
import torchvision.transforms as transforms
|
| 5 |
+
from PIL import Image
|
| 6 |
+
|
| 7 |
+
# Load Pretrained ResNet-50 Model
|
| 8 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 9 |
+
model = models.resnet50(pretrained=True).to(device)
|
| 10 |
+
model.eval()
|
| 11 |
+
|
| 12 |
+
# Remove the last fully connected layer
|
| 13 |
+
model = torch.nn.Sequential(*(list(model.children())[:-1]))
|
| 14 |
+
|
| 15 |
+
# Define Preprocessing Transform
|
| 16 |
+
transform = transforms.Compose([
|
| 17 |
+
transforms.Resize((224, 224)),
|
| 18 |
+
transforms.ToTensor(),
|
| 19 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 20 |
+
])
|
| 21 |
+
|
| 22 |
+
def extract_features(image_path):
|
| 23 |
+
"""Extracts ResNet feature vector from an image."""
|
| 24 |
+
image = Image.open(image_path).convert("RGB")
|
| 25 |
+
image = transform(image).unsqueeze(0).to(device)
|
| 26 |
+
|
| 27 |
+
with torch.no_grad():
|
| 28 |
+
features = model(image).squeeze()
|
| 29 |
+
|
| 30 |
+
return features.cpu().numpy().flatten().tolist() # Convert tensor to list
|