ํ์ด๋ฏธ๋์ด
๋ฏธ๋ ํ๋ก์ ํธ 3์ผ์ฐจ..
์ํ์ด0812
2024. 11. 4. 00:59
728x90
๋ฐ์ํ
# ํ ์คํธ ์์ฝ ๋ชจ๋ธ ์ฌ์ฉํ๊ธฐ
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import nltk
from fastapi import FastAPI, Form
import torch
# NLTK ๋ฐ์ดํฐ ๋ค์ด๋ก๋
nltk.download('punkt')
# ๋ชจ๋ธ๊ณผ ํ ํฌ๋์ด์ ์ค์
model_dir = "lcw99/t5-base-korean-text-summary"
tokenizer = AutoTokenizer.from_pretrained(model_dir)
model = AutoModelForSeq2SeqLM.from_pretrained(model_dir)
# CUDA ์ฅ์น ์ค์
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device) # ๋ชจ๋ธ์ CUDA๋ก ์ด๋
app = FastAPI()
@app.post("/text_sum/")
async def text_sum(input_text: str = Form(...)):
max_input_length = 512
# ์
๋ ฅ ํ
์คํธ ์ค์ ๋ฐ ์ ์ฒ๋ฆฌ
text = input_text
inputs = ["summarize: " + text]
inputs = tokenizer(inputs, max_length=max_input_length, truncation=True, return_tensors="pt").to(device) # ์
๋ ฅ์ CUDA๋ก ์ด๋
# ์์ฝ ์์ฑ
with torch.no_grad():
output = model.generate(**inputs, num_beams=8, do_sample=True, min_length=10, max_length=100)
# ์์ฝ๋ ํ
์คํธ ๋์ฝ๋ฉ ๋ฐ ๋ฌธ์ฅํ
decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
predicted_summary = nltk.sent_tokenize(decoded_output.strip())[0]
return {"result": predicted_summary}
# ๋ช ์ธ ์ถ์ฒ ๋ชจ๋ธ ์ฌ์ฉ ๋ชปํ๊ณ ๋๋ค ๋๋ฆฌ๊ธฐ;;;
from transformers import OPTForCausalLM, GPT2Tokenizer
from fastapi import FastAPI, Form
import torch
import random
# ๋ชจ๋ธ๊ณผ ํ ํฌ๋์ด์ ์ค์
model = OPTForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.float16)
tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m")
# ๋ชจ๋ธ์ด ์ฌ์ฉํ ์ฅ์น ์ค์
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
# FastAPI ์ฑ ์ด๊ธฐํ
app = FastAPI()
# ๋ช
์ธ ํ์ผ์์ ๋ช
์ธ์ ์ฝ์ด ๋ฆฌ์คํธ์ ์ ์ฅ
def load_quotes(file_path="wise_saying.txt"):
with open(file_path, "r", encoding="utf-8") as file:
return [line.strip() for line in file.readlines() if line.strip()]
quotes = load_quotes() # wise_saying.txt์์ ๋ช
์ธ ๋ถ๋ฌ์ค๊ธฐ
# FastAPI ์๋ํฌ์ธํธ: ์
๋ ฅํ ํ
์คํธ๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ๋ช
์ธ ์์ฑ
@app.post("/create_text/")
async def create_text(input_text: str = Form(...)):
# ๋ช
์ธ ๋ฆฌ์คํธ์์ ๋ฌด์์๋ก ํ๋ ์ ํ
selected_quote = random.choice(quotes)
selected_quote = selected_quote.split('.', 1)[-1].strip()
return {"result": selected_quote}
# # ์
๋ ฅ ํ
์คํธ์ ๊ฐ์ฅ ๊ด๋ จ ์๋ ๋ช
์ธ์ ์ฐพ๋ ํจ์
# def get_best_quote(input_text, quotes):
# best_quote = ""
# highest_score = -float("inf") # ์ด๊ธฐ ์ต๊ณ ์ ์๋ฅผ ์์ ๋ฌดํ๋๋ก ์ค์
# for quote in quotes:
# prompt = f"๋ค์ ์
๋ ฅ ๋ฌธ๊ตฌ์ ๊ฐ์ฅ ์ ๋ง๋ ๋ช
์ธ์ ๊ณ ๋ฅด์์ค: '{input_text}'\n๋ช
์ธ: '{quote}'"
# # ์
๋ ฅ ํ
์คํธ์ ๋ช
์ธ์ ํจ๊ป ํ ํฐํํ์ฌ ๋ชจ๋ธ ์
๋ ฅ์ผ๋ก ์ฌ์ฉ
# inputs = tokenizer(prompt, return_tensors="pt").to(device)
# with torch.no_grad():
# # ํ
์คํธ์ ๋ช
์ธ์ ๊ด๋ จ์ฑ์ ํ๊ฐํ๋๋ก ๋ชจ๋ธ ์ฌ์ฉ
# output = model(**inputs).logits.mean().item()
# # ๋ชจ๋ธ ์ถ๋ ฅ์ด ๋์์๋ก ๋์ ์ฐ๊ด์ฑ์ผ๋ก ๊ฐ์ฃผ
# if output > highest_score:
# highest_score = output
# best_quote = quote
# return best_quote
# # FastAPI ์๋ํฌ์ธํธ: ์
๋ ฅํ ํ
์คํธ๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ๊ฐ์ฅ ์ ํฉํ ๋ช
์ธ ์ ํ
# @app.post("/create_text/")
# async def create_text(input_text: str = Form(...)):
# # ์
๋ ฅ ํ
์คํธ์ ๊ฐ์ฅ ๊ด๋ จ ์๋ ๋ช
์ธ์ ์ฐพ์
# selected_quote = get_best_quote(input_text, quotes)
# selected_quote = selected_quote.split('.', 1)[-1].strip()
# return {"result": selected_quote}
# ํ ์คํธ ์ด๋ฏธ์ง ์์ฑ ๋ค์ ํด๋ณด๊ธฐ..
from diffusers import StableDiffusionPipeline
from fastapi import FastAPI, Form
from fastapi.responses import FileResponse
from PIL import Image
import requests
import json
import torch
model_id = "CompVis/stable-diffusion-v1-4"
device = "cuda"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe = pipe.to(device)
# FastAPI ์ธ์คํด์ค ์์ฑ
app = FastAPI()
# ์ด๋ฏธ์ง ์์ฑ ์๋ํฌ์ธํธ
@app.post("/createimage/")
async def create_image(input_text: str = Form(...)):
# ํ
์คํธ ๊ธฐ๋ฐ์ผ๋ก ์ด๋ฏธ์ง ์์ฑ
image = pipe(
prompt=input_text,
num_inference_steps=28,
guidance_scale=3.5
).images[0]
# ์ด๋ฏธ์ง ์ ์ฅ
image_path = "generated_image.png"
image.save(image_path)
# return {"image_path": image_path}
return FileResponse(image_path, media_type="image/png")
# ์๊ฐ ๋๋ฌด ์ค๋ ๊ฑธ๋ ค์ ์ค์ด๋ ์ค...
from diffusers import StableDiffusionPipeline
from fastapi import FastAPI, Form
from fastapi.responses import FileResponse
import torch
model_id = "runwayml/stable-diffusion-v1-5"
device = "cuda"
# Stable Diffusion Pipeline ์ค์ (FP16 ์ฌ์ฉ)
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(device)
app = FastAPI()
@app.post("/createimage/")
async def create_image(input_text: str = Form(...)):
# ํ
์คํธ ๊ธฐ๋ฐ์ผ๋ก ์ด๋ฏธ์ง ์์ฑ (์๋ ์ต์ ํ๋ฅผ ์ํ ํ๋ผ๋ฏธํฐ ์กฐ์ )
image = pipe(
prompt=input_text,
num_inference_steps=20, # ์์ฑ ์๋ ๊ฐ์ ์ ์ํด 20๋จ๊ณ๋ก ์ค์
guidance_scale=3.0 # ํ๋กฌํํธ ๋ฐ๋ฅด๋ ๊ฐ๋ ์กฐ์
).images[0]
# ์ด๋ฏธ์ง ์ ์ฅ ๋ฐ ๋ฐํ
image_path = "generated_image.png"
image.save(image_path)
return FileResponse(image_path, media_type="image/png")
์ด์ ์ด StableDiffusion ๋ก๊ทธ์ธ ํ๋ ๋ฒ ์ฑ๋ฒ๋์ด ์๋ ค์ฃผ์ฌ...
1. token ์๋ก ์์ฑํ๊ณ
2. ํฐ๋ฏธ๋์ huggingface-cli login ์ ๋ ฅํ๋ฉด
3. ํ ํฐ ๋ฃ์ด๋ผ๊ณ ํ๋๋ฐ... ์ ๋ณด์ด๋๊น ํ๋ฒ๋ง ๋ถ์ฌ๋ฃ๊ธฐ ํ์ผ..
# '''
# 5. ์์
, ๋ช
์ธ, ๊ทธ๋ฆผ ์ถ์ฒ/์์ฑ: (๋ชจ๋ธ ๋ฏธ์ ) :
# (ํ๋ณด 1) facebook/musicgen-small
# ํ๋กฌํํฐ๋ฅผ ์์ฑํด์ฃผ๋ฉด ์์
์ ๋ง๋ค์ด์ฃผ๋ ๋ชจ๋ธ์
๋๋ค.
# ๋์ ์ด๊ฑด ์ ํฌ๊ฐ ๋ฐฑ์๋ ์ชฝ์์ ํ๋กฌํํฐ๋ฅผ ๋ช๊ฐ์ง ๋ง๋ค์ด์ ๋ฃ์ด์ค์ผ ํ ๊ฒ ๊ฐ์์.
# '''
from diffusers import StableDiffusionPipeline
from fastapi import FastAPI, Form
from PIL import Image
import torch
import json
import requests
# # Hugging Face API ํ ํฐ ์ค์
# API_TOKEN = "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"
# headers = {"Authorization": f"Bearer {API_TOKEN}"}
# Stable Diffusion ๋ชจ๋ธ ๋ก๋
pipe = StableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-3.5-large",
torch_dtype=torch.float16,
use_auth_token=API_TOKEN # API ํ ํฐ์ ์ฌ์ฉํด ์ธ์ฆ
)
pipe = pipe.to("cuda")
# FastAPI ์ธ์คํด์ค ์์ฑ
app = FastAPI()
# ์ด๋ฏธ์ง ์์ฑ ์๋ํฌ์ธํธ
@app.post("/createimage/")
async def create_image(text: str = Form(...)):
"""
์ฃผ์ด์ง ํ
์คํธ ํ๋กฌํํธ๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ์ด๋ฏธ์ง๋ฅผ ์์ฑํ์ฌ ์ ์ฅํฉ๋๋ค.
"""
# ํ
์คํธ ๊ธฐ๋ฐ์ผ๋ก ์ด๋ฏธ์ง ์์ฑ
image = pipe(
prompt=text,
num_inference_steps=28,
guidance_scale=3.5
).images[0]
# ์ด๋ฏธ์ง ์ ์ฅ
image_path = "generated_image.png"
image.save(image_path)
return {"image_path": image_path}
# ์ผ๋จ ์์ ํ๋ฒ ํ ์ด๋ฏธ์ง ์์ฑ
from diffusers import StableDiffusionPipeline
from fastapi import FastAPI, Form
from fastapi.responses import FileResponse
import torch
# ํธํ ๊ฐ๋ฅํ ๋ชจ๋ธ ID๋ฅผ ์ฌ์ฉํ์ฌ ๋ชจ๋ธ ๋ก๋
model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(
model_id,
torch_dtype=torch.float16,
safety_checker=None # ์์ ๊ฒ์ฌ๊ธฐ๋ฅผ ๋นํ์ฑํํ์ฌ ์ผ๋ถ ์ค๋ฅ ๋ฐฉ์ง
).to("cuda")
pipe.enable_attention_slicing()
# FastAPI ์ธ์คํด์ค ์์ฑ
app = FastAPI()
# ์ด๋ฏธ์ง ์์ฑ ์๋ํฌ์ธํธ
@app.post("/createimage/")
async def create_image(text: str = Form(...)):
"""
์ฃผ์ด์ง ํ
์คํธ ํ๋กฌํํธ๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ์ด๋ฏธ์ง๋ฅผ ์์ฑํ์ฌ ์ ์ฅํฉ๋๋ค.
"""
# ํ
์คํธ ๊ธฐ๋ฐ์ผ๋ก ์ด๋ฏธ์ง ์์ฑ
image = pipe(
prompt=text,
num_inference_steps=28,
guidance_scale=3.5
).images[0]
# ์ด๋ฏธ์ง ์ ์ฅ
image_path = "generated_image.png"
image.save(image_path)
return FileResponse(image_path, media_type="image/png")
728x90
๋ฐ์ํ