Upload 4 files
Browse files- app (2).py +52 -0
- is_human_classifier.pkl +3 -0
- requirements (3).txt +7 -0
- train_svm.py +45 -0
app (2).py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import insightface
|
| 6 |
+
import joblib
|
| 7 |
+
|
| 8 |
+
# ArcFace ์ผ๊ตด ๋ถ์๊ธฐ ๋ก๋ฉ
|
| 9 |
+
face_model = insightface.app.FaceAnalysis(name="buffalo_l", providers=['CPUExecutionProvider'])
|
| 10 |
+
face_model.prepare(ctx_id=0)
|
| 11 |
+
|
| 12 |
+
# SVM ๋ชจ๋ธ ๋ก๋
|
| 13 |
+
clf = joblib.load("is_human_classifier.pkl")
|
| 14 |
+
|
| 15 |
+
# ์ผ๊ตด ํ๋ณ ํจ์
|
| 16 |
+
def predict(image):
|
| 17 |
+
try:
|
| 18 |
+
img = np.array(image.convert("RGB"))
|
| 19 |
+
img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
| 20 |
+
|
| 21 |
+
faces = face_model.get(img_bgr)
|
| 22 |
+
if not faces:
|
| 23 |
+
return "โ ์ผ๊ตด์ด ๊ฐ์ง๋์ง ์์์ต๋๋ค."
|
| 24 |
+
|
| 25 |
+
emb = faces[0].embedding.reshape(1, -1)
|
| 26 |
+
pred = clf.predict(emb)[0]
|
| 27 |
+
proba = clf.predict_proba(emb)[0][pred]
|
| 28 |
+
result = "โ
์ฌ๋์
๋๋ค" if pred == 1 else "โ ์ฌ๋์ด ์๋๋๋ค"
|
| 29 |
+
return f"{result} (์ ํ๋: {proba:.2f})"
|
| 30 |
+
except Exception as e:
|
| 31 |
+
return f"โ ์ค๋ฅ ๋ฐ์: {str(e)}"
|
| 32 |
+
|
| 33 |
+
# Gradio ์ธํฐํ์ด์ค (์น์บ + ์
๋ก๋ ํญ ์ ๊ณต)
|
| 34 |
+
with gr.Blocks() as demo:
|
| 35 |
+
gr.Markdown("## ๐ค ์ค์๊ฐ ์ผ๊ตด ํ๋ณ๊ธฐ\n์น์บ ๋๋ ์ด๋ฏธ์ง ์
๋ก๋๋ฅผ ํตํด ์ฌ๋์ด ๋ง๋์ง ํ๋ณํฉ๋๋ค.")
|
| 36 |
+
|
| 37 |
+
with gr.Tab("๐ธ Webcam"):
|
| 38 |
+
webcam_input = gr.Camera(label="์น์บ ")
|
| 39 |
+
webcam_output = gr.Textbox(label="ํ๋ณ ๊ฒฐ๊ณผ")
|
| 40 |
+
webcam_button = gr.Button("์์ธก")
|
| 41 |
+
webcam_button.click(predict, inputs=webcam_input, outputs=webcam_output)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
with gr.Tab("๐ ์ด๋ฏธ์ง ์
๋ก๋"):
|
| 45 |
+
upload_input = gr.Image(label="์ด๋ฏธ์ง ์
๋ก๋")
|
| 46 |
+
upload_output = gr.Textbox(label="ํ๋ณ ๊ฒฐ๊ณผ")
|
| 47 |
+
upload_button = gr.Button("์์ธก")
|
| 48 |
+
upload_button.click(predict, inputs=upload_input, outputs=upload_output)
|
| 49 |
+
|
| 50 |
+
if __name__ == "__main__":
|
| 51 |
+
demo.launch()
|
| 52 |
+
|
is_human_classifier.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ab1a94275b847a7dbbc270b3746f88ae5860dd7e7b9afa340cb1c3e61c30c884
|
| 3 |
+
size 610859
|
requirements (3).txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio>=4.0.0
|
| 2 |
+
opencv-python-headless
|
| 3 |
+
insightface
|
| 4 |
+
onnxruntime
|
| 5 |
+
scikit-learn
|
| 6 |
+
joblib
|
| 7 |
+
Pillow
|
train_svm.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
import insightface
|
| 5 |
+
from sklearn.svm import SVC
|
| 6 |
+
import joblib
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
|
| 9 |
+
# 1. ArcFace ๋ชจ๋ธ ๋ก๋ฉ
|
| 10 |
+
model = insightface.app.FaceAnalysis(name="buffalo_l", providers=['CPUExecutionProvider'])
|
| 11 |
+
model.prepare(ctx_id=0)
|
| 12 |
+
|
| 13 |
+
X = []
|
| 14 |
+
y = []
|
| 15 |
+
|
| 16 |
+
# 2. ์ผ๊ตด ์๋ฒ ๋ฉ ์ถ์ถ ํจ์
|
| 17 |
+
def extract_embeddings(folder_path, label):
|
| 18 |
+
for fname in tqdm(os.listdir(folder_path), desc=f"{label} - {os.path.basename(folder_path)}"):
|
| 19 |
+
if not fname.lower().endswith((".jpg", ".jpeg", ".png")):
|
| 20 |
+
continue
|
| 21 |
+
path = os.path.join(folder_path, fname)
|
| 22 |
+
img = cv2.imread(path)
|
| 23 |
+
if img is None:
|
| 24 |
+
continue
|
| 25 |
+
faces = model.get(img)
|
| 26 |
+
if faces:
|
| 27 |
+
emb = faces[0].embedding
|
| 28 |
+
X.append(emb)
|
| 29 |
+
y.append(label)
|
| 30 |
+
|
| 31 |
+
# 3. ์ฌ๋(1) / ๋น์ฌ๋(0) ํด๋๋ก๋ถํฐ ์๋ฒ ๋ฉ ์ถ์ถ
|
| 32 |
+
extract_embeddings("dataset/human", 1)
|
| 33 |
+
extract_embeddings("dataset/nonhuman", 0)
|
| 34 |
+
|
| 35 |
+
print(f"\nโ
์ด ์ํ ์: {len(X)}")
|
| 36 |
+
print(f" - ์ฌ๋ ์ผ๊ตด: {y.count(1)}")
|
| 37 |
+
print(f" - ๋น์ฌ๋: {y.count(0)}")
|
| 38 |
+
|
| 39 |
+
# 4. SVM ๋ถ๋ฅ๊ธฐ ํ์ต
|
| 40 |
+
clf = SVC(kernel='linear', probability=True)
|
| 41 |
+
clf.fit(X, y)
|
| 42 |
+
|
| 43 |
+
# 5. ๋ชจ๋ธ ์ ์ฅ
|
| 44 |
+
joblib.dump(clf, "is_human_classifier.pkl")
|
| 45 |
+
print("\nโ
๋ถ๋ฅ๊ธฐ ์ ์ฅ ์๋ฃ: is_human_classifier.pkl")
|