-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
137 lines (116 loc) · 4.39 KB
/
app.py
File metadata and controls
137 lines (116 loc) · 4.39 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os
import base64
import requests
import numpy as np
from io import BytesIO
import easyocr
import streamlit as st
from PIL import Image
import torch
from transformers import BlipProcessor, BlipForConditionalGeneration
from transformers import pipeline
from dotenv import load_dotenv
load_dotenv()
hf_token = os.getenv("HF_TOKEN")
summarizer = pipeline(
"summarization",
model="facebook/bart-large-cnn",
device=-1
)
st.set_page_config(page_title="Modern Edge Agent", layout="centered")
st.title("Modern Edge Agent: Image Captioning & Generation")
st.write("Upload an image to get a caption, or enter a prompt to "
"generate an image. Runs on edge devices!")
@st.cache_resource
def load_blip():
processor = BlipProcessor.from_pretrained(
"Salesforce/blip-image-captioning-base"
)
model = BlipForConditionalGeneration.from_pretrained(
"Salesforce/blip-image-captioning-base",
torch_dtype=torch.float32
)
return processor, model
def generate_image_via_api(prompt):
api_url = ("https://api-inference.huggingface.co/models/"
"black-forest-labs/FLUX.1-dev")
headers = {"Authorization": f"Bearer {hf_token}"}
payload = {"inputs": prompt}
response = requests.post(api_url, headers=headers, json=payload)
if response.status_code == 200:
try:
img = Image.open(BytesIO(response.content))
return img
except Exception:
result = response.json()
if isinstance(result, dict) and "image" in result:
img_data = base64.b64decode(result["image"])
img = Image.open(BytesIO(img_data))
return img
else:
raise RuntimeError(
f"API returned unexpected format: {result}"
)
else:
raise RuntimeError(
f"Image API Error: {response.status_code} - {response.text}"
)
tab1, tab2 = st.tabs(["Image Captioning", "Text-to-Image Generation"])
with tab1:
st.header("Image Captioning")
uploaded_file = st.file_uploader(
"Upload an image",
type=["jpg", "jpeg", "png"]
)
if uploaded_file:
image = Image.open(uploaded_file).convert("RGB")
st.image(image, caption="Uploaded Image", use_column_width=True)
processor, model = load_blip()
inputs = processor(image, return_tensors="pt")
with torch.no_grad():
out = model.generate(**inputs)
caption = processor.decode(out[0], skip_special_tokens=True)
st.success(f"**Caption:** {caption}")
st.write("Extracting text from image...")
reader = easyocr.Reader(['en'], gpu=False)
image_np = np.array(image)
ocr_result = reader.readtext(image_np)
filtered_text = [
item[1] for item in ocr_result
if (item[2] > 0.5 and len(item[1].strip()) > 2 and
item[1].isalpha())
]
extracted_text = " ".join(filtered_text)
if extracted_text:
st.info(f"**Extracted Text:** {extracted_text}")
st.write("Generating dynamic summary with open-source LLM...")
try:
if extracted_text:
summary_input = f"{caption}. {extracted_text}"
else:
summary_input = caption
output = summarizer(summary_input)[0]["summary_text"].strip()
st.info(f"**Summary:** {output}")
except Exception as e:
st.warning(f"Summary generation failed: {e}")
if extracted_text:
summary = (f"This image contains: {caption}. "
f"The following text is present: {extracted_text}")
else:
summary = (f"This image contains: {caption}. "
f"No readable text was detected.")
st.info(f"**Summary:** {summary}")
with tab2:
st.header("Text-to-Image Generation")
prompt = st.text_area("Enter a prompt to generate an image:")
if st.button("Generate Image") and prompt:
with st.spinner("Generating image via Hugging Face API..."):
try:
image = generate_image_via_api(prompt)
st.image(
image,
caption="Generated Image",
use_column_width=True
)
except Exception as e:
st.error(f"Image generation failed: {e}")