Update app.py
Browse files
app.py
CHANGED
|
@@ -28,6 +28,14 @@ from pathlib import Path
|
|
| 28 |
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 29 |
|
| 30 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
from PIL import Image, ImageDraw
|
| 32 |
|
| 33 |
# ---- HF Spaces GPU decorator (호환용 더미 포함)
|
|
@@ -462,6 +470,99 @@ def step1_preprocess(img: Image.Image,
|
|
| 462 |
base = _redraw(base, redraw_strength, redraw_steps, redraw_guidance)
|
| 463 |
p = _save_png(base, OUT/"step1"/"input_preprocessed.png")
|
| 464 |
return [base], p, "\n".join(logs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 465 |
|
| 466 |
# ---------------------------------
|
| 467 |
# STEP2: Spaces call (model + texture)
|
|
@@ -674,11 +775,18 @@ with gr.Blocks() as demo:
|
|
| 674 |
s1_path = gr.Textbox(label="STEP1 결과 경로", interactive=False)
|
| 675 |
s1_log = gr.Textbox(label="STEP1 로그", interactive=False)
|
| 676 |
|
| 677 |
-
|
| 678 |
-
|
| 679 |
-
|
| 680 |
-
|
| 681 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 682 |
|
| 683 |
with gr.Tab("STEP 2 — 3D 생성 (모델/텍스처)"):
|
| 684 |
do_texture = gr.Checkbox(value=True, label="텍스처 단계 실행(지원 시)")
|
|
|
|
| 28 |
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 29 |
|
| 30 |
import gradio as gr
|
| 31 |
+
try:
|
| 32 |
+
import spaces
|
| 33 |
+
except Exception:
|
| 34 |
+
class _Dummy:
|
| 35 |
+
def GPU(self, *a, **k):
|
| 36 |
+
def deco(fn): return fn
|
| 37 |
+
return deco
|
| 38 |
+
spaces = _Dummy()
|
| 39 |
from PIL import Image, ImageDraw
|
| 40 |
|
| 41 |
# ---- HF Spaces GPU decorator (호환용 더미 포함)
|
|
|
|
| 470 |
base = _redraw(base, redraw_strength, redraw_steps, redraw_guidance)
|
| 471 |
p = _save_png(base, OUT/"step1"/"input_preprocessed.png")
|
| 472 |
return [base], p, "\n".join(logs)
|
| 473 |
+
def step1_cpu(img, keep_rembg, do_weaponless, weapon_terms):
|
| 474 |
+
"""CPU 단계: rembg + (있으면) DINO/SAM/LaMa로 무기 제거. CUDA 사용 금지"""
|
| 475 |
+
logs = []
|
| 476 |
+
if img is None:
|
| 477 |
+
raise gr.Error("이미지를 업로드하세요.")
|
| 478 |
+
base = img.convert("RGBA")
|
| 479 |
+
|
| 480 |
+
# rembg
|
| 481 |
+
try:
|
| 482 |
+
base = _remove_bg(base) if keep_rembg else base
|
| 483 |
+
if keep_rembg:
|
| 484 |
+
logs.append("rembg 배경 제거")
|
| 485 |
+
except Exception as e:
|
| 486 |
+
logs.append(f"rembg 실패: {e}")
|
| 487 |
+
|
| 488 |
+
# DINO/SAM/LaMa 무기 제거
|
| 489 |
+
try:
|
| 490 |
+
if do_weaponless:
|
| 491 |
+
base = _weaponless_pipeline(base, weapon_terms, logs)
|
| 492 |
+
except Exception as e:
|
| 493 |
+
logs.append(f"무기 제거 실패: {e}")
|
| 494 |
+
|
| 495 |
+
out_path = _save_png(base, OUT / "step1" / "input_preprocessed.png")
|
| 496 |
+
return [(out_path, "preprocessed")], out_path, "\n".join(logs)
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
@spaces.GPU(duration=600) # ← ZeroGPU 환경: 여기서만 CUDA/모델 로딩 허용
|
| 500 |
+
def step1_gpu_refine(
|
| 501 |
+
s1_path,
|
| 502 |
+
enforce_tpose, tpose_strength, tpose_steps, tpose_guidance,
|
| 503 |
+
do_redraw_flag, redraw_strength, redraw_steps, redraw_guidance
|
| 504 |
+
):
|
| 505 |
+
"""GPU 단계: ControlNet(OpenPose)로 T-포즈 강제 + img2img 리드로우"""
|
| 506 |
+
logs = []
|
| 507 |
+
if not s1_path or not Path(s1_path).exists():
|
| 508 |
+
raise gr.Error("STEP1 이미지가 없습니다. 먼저 STEP1(CPU)을 실행하세요.")
|
| 509 |
+
img = Image.open(s1_path).convert("RGBA")
|
| 510 |
+
|
| 511 |
+
# ---- T-포즈 (ControlNet/OpenPose)
|
| 512 |
+
if enforce_tpose:
|
| 513 |
+
try:
|
| 514 |
+
from diffusers import ControlNetModel, StableDiffusionControlNetImg2ImgPipeline
|
| 515 |
+
import torch
|
| 516 |
+
dev = "cuda" if torch.cuda.is_available() else "cpu"
|
| 517 |
+
cn = ControlNetModel.from_pretrained(
|
| 518 |
+
"lllyasviel/control_v11p_sd15_openpose",
|
| 519 |
+
torch_dtype=(torch.float16 if dev == "cuda" else torch.float32)
|
| 520 |
+
)
|
| 521 |
+
pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
| 522 |
+
"runwayml/stable-diffusion-v1-5",
|
| 523 |
+
controlnet=cn,
|
| 524 |
+
torch_dtype=(torch.float16 if dev == "cuda" else torch.float32)
|
| 525 |
+
)
|
| 526 |
+
if dev == "cuda":
|
| 527 |
+
pipe.to("cuda")
|
| 528 |
+
pose_canvas = _draw_tpose_openpose_canvas(size=max(img.size))
|
| 529 |
+
img = pipe(
|
| 530 |
+
prompt="T-pose, full body, clean anime lines",
|
| 531 |
+
image=img.convert("RGB"),
|
| 532 |
+
control_image=pose_canvas,
|
| 533 |
+
strength=float(tpose_strength),
|
| 534 |
+
guidance_scale=float(tpose_guidance),
|
| 535 |
+
num_inference_steps=int(tpose_steps)
|
| 536 |
+
).images[0].convert("RGBA")
|
| 537 |
+
logs.append("ControlNet(OpenPose) T-포즈 적용")
|
| 538 |
+
except Exception as e:
|
| 539 |
+
logs.append(f"T-포즈 ControlNet 실패: {e}")
|
| 540 |
+
|
| 541 |
+
# ---- img2img 리드로우 (옵션)
|
| 542 |
+
if do_redraw_flag:
|
| 543 |
+
try:
|
| 544 |
+
from diffusers import StableDiffusionImg2ImgPipeline
|
| 545 |
+
import torch
|
| 546 |
+
dev = "cuda" if torch.cuda.is_available() else "cpu"
|
| 547 |
+
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
| 548 |
+
"runwayml/stable-diffusion-v1-5",
|
| 549 |
+
torch_dtype=(torch.float16 if dev == "cuda" else torch.float32)
|
| 550 |
+
)
|
| 551 |
+
if dev == "cuda":
|
| 552 |
+
pipe.to("cuda")
|
| 553 |
+
img = pipe(
|
| 554 |
+
prompt="clean anime illustration, sharp lines, simple solid background",
|
| 555 |
+
image=img.convert("RGB"),
|
| 556 |
+
strength=float(redraw_strength),
|
| 557 |
+
guidance_scale=float(redraw_guidance),
|
| 558 |
+
num_inference_steps=int(redraw_steps)
|
| 559 |
+
).images[0].convert("RGBA")
|
| 560 |
+
logs.append("img2img 리드로우 적용")
|
| 561 |
+
except Exception as e:
|
| 562 |
+
logs.append(f"img2img 리드로우 실패: {e}")
|
| 563 |
+
|
| 564 |
+
out_path = _save_png(img, OUT / "step1" / "input_preprocessed.png")
|
| 565 |
+
return [(out_path, "refined")], out_path, "\n".join(logs)
|
| 566 |
|
| 567 |
# ---------------------------------
|
| 568 |
# STEP2: Spaces call (model + texture)
|
|
|
|
| 775 |
s1_path = gr.Textbox(label="STEP1 결과 경로", interactive=False)
|
| 776 |
s1_log = gr.Textbox(label="STEP1 로그", interactive=False)
|
| 777 |
|
| 778 |
+
# STEP1 실행: CPU → (then) GPU
|
| 779 |
+
s1_btn.click(
|
| 780 |
+
step1_cpu,
|
| 781 |
+
inputs=[s1_img, keep_rembg, do_weaponless, weapon_terms],
|
| 782 |
+
outputs=[s1_gallery, s1_path, s1_log]
|
| 783 |
+
).then(
|
| 784 |
+
step1_gpu_refine,
|
| 785 |
+
inputs=[s1_path, enforce_tpose, tpose_strength, tpose_steps, tpose_guidance,
|
| 786 |
+
do_redraw_flag, redraw_strength, redraw_steps, redraw_guidance],
|
| 787 |
+
outputs=[s1_gallery, s1_path, s1_log]
|
| 788 |
+
)
|
| 789 |
+
|
| 790 |
|
| 791 |
with gr.Tab("STEP 2 — 3D 생성 (모델/텍스처)"):
|
| 792 |
do_texture = gr.Checkbox(value=True, label="텍스처 단계 실행(지원 시)")
|