ํ•˜์ด๋ฏธ๋””์–ด

๋ฏธ๋‹ˆ ํ”„๋กœ์ ํŠธ 3์ผ์ฐจ..

์ˆ˜ํ˜€์ด0812 2024. 11. 4. 00:59
728x90
๋ฐ˜์‘ํ˜•

 

# ํ…์ŠคํŠธ ์š”์•ฝ ๋ชจ๋ธ ์‚ฌ์šฉํ•˜๊ธฐ 

 

from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import nltk
from fastapi import FastAPI, Form
import torch

# NLTK ๋ฐ์ดํ„ฐ ๋‹ค์šด๋กœ๋“œ
nltk.download('punkt')

# ๋ชจ๋ธ๊ณผ ํ† ํฌ๋‚˜์ด์ € ์„ค์ •
model_dir = "lcw99/t5-base-korean-text-summary"
tokenizer = AutoTokenizer.from_pretrained(model_dir)
model = AutoModelForSeq2SeqLM.from_pretrained(model_dir)

# CUDA ์žฅ์น˜ ์„ค์ •
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)  # ๋ชจ๋ธ์„ CUDA๋กœ ์ด๋™

app = FastAPI()

@app.post("/text_sum/")
async def text_sum(input_text: str = Form(...)):
    max_input_length = 512

    # ์ž…๋ ฅ ํ…์ŠคํŠธ ์„ค์ • ๋ฐ ์ „์ฒ˜๋ฆฌ
    text = input_text
    inputs = ["summarize: " + text]
    inputs = tokenizer(inputs, max_length=max_input_length, truncation=True, return_tensors="pt").to(device)  # ์ž…๋ ฅ์„ CUDA๋กœ ์ด๋™

    # ์š”์•ฝ ์ƒ์„ฑ
    with torch.no_grad():
        output = model.generate(**inputs, num_beams=8, do_sample=True, min_length=10, max_length=100)

    # ์š”์•ฝ๋œ ํ…์ŠคํŠธ ๋””์ฝ”๋”ฉ ๋ฐ ๋ฌธ์žฅํ™”
    decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
    predicted_summary = nltk.sent_tokenize(decoded_output.strip())[0]

    return {"result": predicted_summary}

 

 

# ๋ช…์–ธ ์ถ”์ฒœ ๋ชจ๋ธ ์‚ฌ์šฉ ๋ชปํ•˜๊ณ  ๋žœ๋ค ๋Œ๋ฆฌ๊ธฐ;;;

 

from transformers import OPTForCausalLM, GPT2Tokenizer
from fastapi import FastAPI, Form
import torch
import random

# ๋ชจ๋ธ๊ณผ ํ† ํฌ๋‚˜์ด์ € ์„ค์ •
model = OPTForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.float16)
tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m")

# ๋ชจ๋ธ์ด ์‚ฌ์šฉํ•  ์žฅ์น˜ ์„ค์ •
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)

# FastAPI ์•ฑ ์ดˆ๊ธฐํ™”
app = FastAPI()

# ๋ช…์–ธ ํŒŒ์ผ์—์„œ ๋ช…์–ธ์„ ์ฝ์–ด ๋ฆฌ์ŠคํŠธ์— ์ €์žฅ
def load_quotes(file_path="wise_saying.txt"):
    with open(file_path, "r", encoding="utf-8") as file:
        return [line.strip() for line in file.readlines() if line.strip()]

quotes = load_quotes()  # wise_saying.txt์—์„œ ๋ช…์–ธ ๋ถˆ๋Ÿฌ์˜ค๊ธฐ

# FastAPI ์—”๋“œํฌ์ธํŠธ: ์ž…๋ ฅํ•œ ํ…์ŠคํŠธ๋ฅผ ๊ธฐ๋ฐ˜์œผ๋กœ ๋ช…์–ธ ์ƒ์„ฑ
@app.post("/create_text/")
async def create_text(input_text: str = Form(...)):
    # ๋ช…์–ธ ๋ฆฌ์ŠคํŠธ์—์„œ ๋ฌด์ž‘์œ„๋กœ ํ•˜๋‚˜ ์„ ํƒ
    selected_quote = random.choice(quotes)
    selected_quote = selected_quote.split('.', 1)[-1].strip()
    return {"result": selected_quote}


# # ์ž…๋ ฅ ํ…์ŠคํŠธ์™€ ๊ฐ€์žฅ ๊ด€๋ จ ์žˆ๋Š” ๋ช…์–ธ์„ ์ฐพ๋Š” ํ•จ์ˆ˜
# def get_best_quote(input_text, quotes):
#     best_quote = ""
#     highest_score = -float("inf")  # ์ดˆ๊ธฐ ์ตœ๊ณ  ์ ์ˆ˜๋ฅผ ์Œ์˜ ๋ฌดํ•œ๋Œ€๋กœ ์„ค์ •

#     for quote in quotes:
#         prompt = f"๋‹ค์Œ ์ž…๋ ฅ ๋ฌธ๊ตฌ์™€ ๊ฐ€์žฅ ์ž˜ ๋งž๋Š” ๋ช…์–ธ์„ ๊ณ ๋ฅด์‹œ์˜ค: '{input_text}'\n๋ช…์–ธ: '{quote}'"
        
#         # ์ž…๋ ฅ ํ…์ŠคํŠธ์™€ ๋ช…์–ธ์„ ํ•จ๊ป˜ ํ† ํฐํ™”ํ•˜์—ฌ ๋ชจ๋ธ ์ž…๋ ฅ์œผ๋กœ ์‚ฌ์šฉ
#         inputs = tokenizer(prompt, return_tensors="pt").to(device)
#         with torch.no_grad():
#             # ํ…์ŠคํŠธ์™€ ๋ช…์–ธ์˜ ๊ด€๋ จ์„ฑ์„ ํ‰๊ฐ€ํ•˜๋„๋ก ๋ชจ๋ธ ์‚ฌ์šฉ
#             output = model(**inputs).logits.mean().item()
        
#         # ๋ชจ๋ธ ์ถœ๋ ฅ์ด ๋†’์„์ˆ˜๋ก ๋†’์€ ์—ฐ๊ด€์„ฑ์œผ๋กœ ๊ฐ„์ฃผ
#         if output > highest_score:
#             highest_score = output
#             best_quote = quote

#     return best_quote

# # FastAPI ์—”๋“œํฌ์ธํŠธ: ์ž…๋ ฅํ•œ ํ…์ŠคํŠธ๋ฅผ ๊ธฐ๋ฐ˜์œผ๋กœ ๊ฐ€์žฅ ์ ํ•ฉํ•œ ๋ช…์–ธ ์„ ํƒ
# @app.post("/create_text/")
# async def create_text(input_text: str = Form(...)):
#     # ์ž…๋ ฅ ํ…์ŠคํŠธ์™€ ๊ฐ€์žฅ ๊ด€๋ จ ์žˆ๋Š” ๋ช…์–ธ์„ ์ฐพ์Œ
#     selected_quote = get_best_quote(input_text, quotes)
#     selected_quote = selected_quote.split('.', 1)[-1].strip()
#     return {"result": selected_quote}

 

 

# ํ…์ŠคํŠธ ์ด๋ฏธ์ง€ ์ƒ์„ฑ ๋‹ค์‹œ ํ•ด๋ณด๊ธฐ..

 

from diffusers import StableDiffusionPipeline
from fastapi import FastAPI, Form
from fastapi.responses import FileResponse
from PIL import Image
import requests
import json
import torch

model_id = "CompVis/stable-diffusion-v1-4"
device = "cuda"

pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe = pipe.to(device)


# FastAPI ์ธ์Šคํ„ด์Šค ์ƒ์„ฑ
app = FastAPI()

# ์ด๋ฏธ์ง€ ์ƒ์„ฑ ์—”๋“œํฌ์ธํŠธ
@app.post("/createimage/")
async def create_image(input_text: str = Form(...)):       

    # ํ…์ŠคํŠธ ๊ธฐ๋ฐ˜์œผ๋กœ ์ด๋ฏธ์ง€ ์ƒ์„ฑ
    image = pipe(
        prompt=input_text,
        num_inference_steps=28,
        guidance_scale=3.5
    ).images[0]

    # ์ด๋ฏธ์ง€ ์ €์žฅ
    image_path = "generated_image.png"
    image.save(image_path)

    # return {"image_path": image_path}
    return FileResponse(image_path, media_type="image/png")




# ์‹œ๊ฐ„ ๋„ˆ๋ฌด ์˜ค๋ž˜ ๊ฑธ๋ ค์„œ ์ค„์ด๋Š” ์ค‘...

from diffusers import StableDiffusionPipeline
from fastapi import FastAPI, Form
from fastapi.responses import FileResponse
import torch

model_id = "runwayml/stable-diffusion-v1-5"
device = "cuda"

# Stable Diffusion Pipeline ์„ค์ • (FP16 ์‚ฌ์šฉ)
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(device)

app = FastAPI()

@app.post("/createimage/")
async def create_image(input_text: str = Form(...)):       
    # ํ…์ŠคํŠธ ๊ธฐ๋ฐ˜์œผ๋กœ ์ด๋ฏธ์ง€ ์ƒ์„ฑ (์†๋„ ์ตœ์ ํ™”๋ฅผ ์œ„ํ•œ ํŒŒ๋ผ๋ฏธํ„ฐ ์กฐ์ •)
    image = pipe(
        prompt=input_text,
        num_inference_steps=20,  # ์ƒ์„ฑ ์†๋„ ๊ฐœ์„ ์„ ์œ„ํ•ด 20๋‹จ๊ณ„๋กœ ์„ค์ •
        guidance_scale=3.0       # ํ”„๋กฌํ”„ํŠธ ๋”ฐ๋ฅด๋Š” ๊ฐ•๋„ ์กฐ์ •
    ).images[0]

    # ์ด๋ฏธ์ง€ ์ €์žฅ ๋ฐ ๋ฐ˜ํ™˜
    image_path = "generated_image.png"
    image.save(image_path)
    return FileResponse(image_path, media_type="image/png")

 

 

์–ด์ œ ์“ด StableDiffusion ๋กœ๊ทธ์ธ ํ•˜๋Š” ๋ฒ• ์„ฑ๋ฒ”๋‹˜์ด ์•Œ๋ ค์ฃผ์‹ฌ...

 

1. token ์ƒˆ๋กœ ์ƒ์„ฑํ•˜๊ณ 

2. ํ„ฐ๋ฏธ๋„์— huggingface-cli login ์ž…๋ ฅํ•˜๋ฉด

3. ํ† ํฐ ๋„ฃ์–ด๋ผ๊ณ  ํ•˜๋Š”๋ฐ... ์•ˆ ๋ณด์ด๋‹ˆ๊นŒ ํ•œ๋ฒˆ๋งŒ ๋ถ™์—ฌ๋„ฃ๊ธฐ ํ•˜์‚ผ..

# '''
# 5. ์Œ์•…, ๋ช…์–ธ, ๊ทธ๋ฆผ ์ถ”์ฒœ/์ƒ์„ฑ: (๋ชจ๋ธ ๋ฏธ์ •) :
#      (ํ›„๋ณด 1) facebook/musicgen-small 
#      ํ”„๋กฌํ”„ํ„ฐ๋ฅผ ์ž‘์„ฑํ•ด์ฃผ๋ฉด ์Œ์•…์„ ๋งŒ๋“ค์–ด์ฃผ๋Š” ๋ชจ๋ธ์ž…๋‹ˆ๋‹ค. 
#      ๋Œ€์‹  ์ด๊ฑด ์ €ํฌ๊ฐ€ ๋ฐฑ์—”๋“œ ์ชฝ์—์„œ ํ”„๋กฌํ”„ํ„ฐ๋ฅผ ๋ช‡๊ฐ€์ง€ ๋งŒ๋“ค์–ด์„œ ๋„ฃ์–ด์ค˜์•ผ ํ•  ๊ฒƒ ๊ฐ™์•„์š”.
# '''


from diffusers import StableDiffusionPipeline
from fastapi import FastAPI, Form
from PIL import Image
import torch
import json
import requests

# # Hugging Face API ํ† ํฐ ์„ค์ •
# API_TOKEN = "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"
# headers = {"Authorization": f"Bearer {API_TOKEN}"}

# Stable Diffusion ๋ชจ๋ธ ๋กœ๋“œ
pipe = StableDiffusionPipeline.from_pretrained(
    "stabilityai/stable-diffusion-3.5-large",
    torch_dtype=torch.float16,
    use_auth_token=API_TOKEN  # API ํ† ํฐ์„ ์‚ฌ์šฉํ•ด ์ธ์ฆ
)
pipe = pipe.to("cuda")

# FastAPI ์ธ์Šคํ„ด์Šค ์ƒ์„ฑ
app = FastAPI()

# ์ด๋ฏธ์ง€ ์ƒ์„ฑ ์—”๋“œํฌ์ธํŠธ
@app.post("/createimage/")
async def create_image(text: str = Form(...)):
    """
    ์ฃผ์–ด์ง„ ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ๋ฅผ ๊ธฐ๋ฐ˜์œผ๋กœ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜์—ฌ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค.
    """
    # ํ…์ŠคํŠธ ๊ธฐ๋ฐ˜์œผ๋กœ ์ด๋ฏธ์ง€ ์ƒ์„ฑ
    image = pipe(
        prompt=text,
        num_inference_steps=28,
        guidance_scale=3.5
    ).images[0]

    # ์ด๋ฏธ์ง€ ์ €์žฅ
    image_path = "generated_image.png"
    image.save(image_path)

    return {"image_path": image_path}
    
 
 
 # ์ผ๋‹จ ์ˆ˜์ • ํ•œ๋ฒˆ ํ•œ ์ด๋ฏธ์ง€ ์ƒ์„ฑ 
 from diffusers import StableDiffusionPipeline
from fastapi import FastAPI, Form
from fastapi.responses import FileResponse
import torch

# ํ˜ธํ™˜ ๊ฐ€๋Šฅํ•œ ๋ชจ๋ธ ID๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ชจ๋ธ ๋กœ๋“œ
model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(
    model_id,
    torch_dtype=torch.float16,
    safety_checker=None  # ์•ˆ์ „ ๊ฒ€์‚ฌ๊ธฐ๋ฅผ ๋น„ํ™œ์„ฑํ™”ํ•˜์—ฌ ์ผ๋ถ€ ์˜ค๋ฅ˜ ๋ฐฉ์ง€
).to("cuda")
pipe.enable_attention_slicing()

# FastAPI ์ธ์Šคํ„ด์Šค ์ƒ์„ฑ
app = FastAPI()

# ์ด๋ฏธ์ง€ ์ƒ์„ฑ ์—”๋“œํฌ์ธํŠธ
@app.post("/createimage/")
async def create_image(text: str = Form(...)):
    """
    ์ฃผ์–ด์ง„ ํ…์ŠคํŠธ ํ”„๋กฌํ”„ํŠธ๋ฅผ ๊ธฐ๋ฐ˜์œผ๋กœ ์ด๋ฏธ์ง€๋ฅผ ์ƒ์„ฑํ•˜์—ฌ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค.
    """
    # ํ…์ŠคํŠธ ๊ธฐ๋ฐ˜์œผ๋กœ ์ด๋ฏธ์ง€ ์ƒ์„ฑ
    image = pipe(
        prompt=text,
        num_inference_steps=28,
        guidance_scale=3.5
    ).images[0]

    # ์ด๋ฏธ์ง€ ์ €์žฅ
    image_path = "generated_image.png"
    image.save(image_path)

    return FileResponse(image_path, media_type="image/png")
728x90
๋ฐ˜์‘ํ˜•