diff --git a/constants/endpoints-constants.js b/constants/endpoints-constants.js
index d22b315..35959c1 100644
--- a/constants/endpoints-constants.js
+++ b/constants/endpoints-constants.js
@@ -1,5 +1,14 @@
export const appUrl = `https://local.webaverse.com`;
-export const weaviateUrl = `https://weaviate.weabaverse.com`;
-export const stableDiffusionUrl = `https://stable-diffusion.webaverse.com`;
+export const gpt3Url = `https://gpt3.webaverse.com`; // create
export const voiceUrl = `https://voice-cw.webaverse.com`;
-export const diffsoundUrl = `https://diffsound.webaverse.com`;
\ No newline at end of file
+export const diffsoundUrl = `https://diffsound.webaverse.com`;
+export const motionDiffusionUrl = `https://motion-diffusion.webaverse.com`; // create
+export const stableDreamfusionUrl = `https://stable-dreamfusion.webaverse.com`; // create
+export const get3dUrl = `https://get-3d.webaverse.com`; // create
+export const musicGeneratorUrl = `https://music-generator.webaverse.com`; // create
+export const discoDiffusionUrl = `https://disco-diffusion.webaverse.com`; // create
+// coreweave
+export const pixelArtUrl = `https://pixel-art.webaverse.com`;
+export const weaviateUrl = `https://weaviate.webaverse.com`;
+export const stableDiffusionUrl = `https://stable-diffusion.webaverse.com`;
+export const blipUrl = `https://blip.webaverse.com`;
\ No newline at end of file
diff --git a/generators/generator.js b/generators/generator.js
new file mode 100644
index 0000000..c6536ab
--- /dev/null
+++ b/generators/generator.js
@@ -0,0 +1,82 @@
+// import {stableDiffusionUrl} from '../../constants/endpoints.js'
+import fetch from 'node-fetch'
+import {
+ voiceUrl,
+ stableDiffusionUrl,
+ diffsoundUrl,
+ pixelArtUrl,
+ blipUrl,
+} from '../constants/endpoints-constants'
+
+export const generateVoice = () => async ({s, voice} = {}) => {
+ return `${voiceUrl}/tts?s=${s}&voice=${voice}`
+}
+
+export const generateImage = ({
+ modelName,
+ prefix,
+}) => async ({
+ name,
+ description,
+} = {}) => {
+ const s = `${prefix} ${description}`
+ const u = `${stableDiffusionUrl}/image?s=${encodeURIComponent(s)}&model=${modelName}`
+ const res = await fetch(u)
+ if (res.ok) {
+ const arrayBuffer = await res.arrayBuffer()
+ if (arrayBuffer.byteLength > 0) {
+ return arrayBuffer
+ } else {
+ throw new Error(`generated empty image`)
+ }
+ } else {
+ throw new Error(`invalid status: ${res.status}`)
+ }
+}
+
+export const generateDiffSound = () => async ({s} = {}) => {
+ return `${diffsoundUrl}/sound?s=${s}`
+}
+
+export const generatePixelArt = () => async () => {
+ const delay = ms => new Promise(res => setTimeout(res, ms))
+ let queryId = ''
+ const generate = `${pixelArtUrl}/generate?steps=25&seed=30&s=snowy mountains`
+ await fetch(generate)
+ .then(r => r.json())
+ .then(d => {queryId = d.id})
+ .catch()
+ await delay(50000)
+ const pixelArt = `${pixelArtUrl}/generate_result?query_id=${queryId}`
+ const res = await fetch(pixelArt)
+ if (res.ok) {
+ const arrayBuffer = await res.arrayBuffer()
+ if (arrayBuffer.byteLength > 0) {
+ return arrayBuffer
+ } else {
+ throw new Error(`generated empty image`)
+ }
+ } else {
+ throw new Error(`invalid status: ${res.status}`)
+ }
+}
+
+// export const generateDiffSound = () => async ({s} = {}) => {
+export const generateBlipResult = () => async ({s} = {}) => {
+ const u = `${blipUrl}/upload/url?task=image_captioning&img_url=${s}`
+
+ await fetch(u, {
+ mode: 'no-cors',
+ headers: {
+ 'Access-Control-Allow-Origin': '*',
+ 'Access-Control-Allow-Headers': '*',
+ 'Cross-Origin-Embedder-Policy': 'same-origin'
+ }
+ })
+ .then(r => r.json())
+ .then(d => {
+ console.log(d)
+ return d
+ })
+ .catch(e => {console.log(e)})
+}
diff --git a/pages/generators.js b/pages/generators.js
new file mode 100644
index 0000000..51c1b0c
--- /dev/null
+++ b/pages/generators.js
@@ -0,0 +1,292 @@
+import Head from 'next/head'
+import {useState} from 'react'
+import styles from '../styles/Home.module.css'
+import {
+ generateVoice,
+ generateImage,
+ generateDiffSound,
+ generatePixelArt,
+ generateBlipResult,
+} from '../generators/generator'
+
+// import Reader from 'riff-wave-reader/lib/reader'
+
+export default function Generators() {
+ const [loadingVoice, setLoadingVoice] = useState(false)
+ const [generatedVoice, setGeneratedVoice] = useState(null)
+ const [transcript, setTranscript] = useState('')
+ const [voice, setVoice] = useState('')
+
+ const [loadingImage, setLoadingImage] = useState(false)
+ const [generatedImage, setGeneratedImage] = useState(null)
+
+ const [loadingSound, setLoadingSound] = useState(false)
+ const [generatedSound, setGeneratedSound] = useState(null)
+ const [sound, setSound] = useState('')
+
+ const [loadingPixelArt, setLoadingPixelArt] = useState(false)
+ const [generatedPixelArt, setGeneratedPixelArt] = useState(null)
+
+ const [loadingBlip, setLoadingBlip] = useState(false)
+ const [generatedBlip, setGeneratedBlip] = useState('')
+ const [blipImageUrl, setBlipImageUrl] = useState('')
+
+ // generateVoice
+ const handleTranscript = e => {
+ setTranscript(e.target.value)
+ }
+ const handleVoice = e => {
+ setVoice(e.target.value)
+ }
+ async function generateTestVoice() {
+ const newVoice = generateVoice()
+ const voiceArrayBuffer = await newVoice({s: transcript, voice})
+ const blob = new Blob([await (await fetch(voiceArrayBuffer)).arrayBuffer()])
+ const audioFromBlob = URL.createObjectURL(blob)
+ setGeneratedVoice(audioFromBlob)
+ setLoadingVoice(false)
+ }
+
+ // generateImage
+ async function generateTestImage() {
+ setLoadingImage(true)
+ let description = 'test generate image'
+ const arrayBuffer = generateImage({
+ modelName: null,
+ s: 'test',
+ })
+ let imgArrayBuffer = await arrayBuffer(description)
+
+ const blob = new Blob([imgArrayBuffer], {
+ type: 'image/png',
+ })
+ const image = URL.createObjectURL(blob)
+ setGeneratedImage(image)
+ setLoadingImage(false)
+ }
+
+ // generateDiffSound
+ const handleSound = e => {
+ setSound(e.target.value)
+ }
+ async function generateTestDiffSound() {
+ // setGeneratedSound(true)
+ const newSound = generateDiffSound()
+ const soundArrayBuffer = await newSound({s: sound})
+ const blob = new Blob([await (await fetch(soundArrayBuffer)).arrayBuffer()])
+ const soundFromBlob = URL.createObjectURL(blob)
+ setGeneratedSound(soundFromBlob)
+ setLoadingSound(false)
+ }
+
+ // generate pixel art
+ async function generateTestPixelArt() {
+ setLoadingPixelArt(true)
+ const newPixelArt = generatePixelArt()
+ const pixelArtBuffer = await newPixelArt()
+
+ const blob = new Blob([pixelArtBuffer], {
+ type: 'image/png',
+ })
+ const image = URL.createObjectURL(blob)
+ setGeneratedPixelArt(image)
+ setLoadingPixelArt(false)
+ }
+
+ // generate BLIP result
+ const handleBlip = e => {
+ setBlipImageUrl(e.target.value)
+ }
+ async function generateBlip() {
+ const newBlip = generateBlipResult()
+ const result = await newBlip({s: blipImageUrl})
+ setGeneratedBlip(result)
+ setLoadingBlip(false)
+ }
+
+ // TODO styling!!
+ return (
+
+
+
Test - Generators
+
+
+
+
+ Generate voice using tiktalknet
+ Endpoints:
+
/tts?s={"{s}"}&voice={"{voice}"}
+
+ - s (string): text to convert
+ - voice (string | optional): the id of the voice to use
+
+
+
+ {loadingVoice &&
+
+
+
+
+
+
+
+ }
+ {!loadingVoice && !generatedVoice && No voice data
}
+ {!loadingVoice && generatedVoice &&
+
+ }
+
+ Generate image using Stable Diffusion
+ Endpoints:
+
/image?s={"{s}"}&model={"{model}"}
+
+ - s (string): image url
+ - model (string | optional): the id of the model to use
+
+
+
+ {loadingImage && Loading...
}
+ {!loadingImage && !generatedImage && No image data
}
+ {!loadingImage && generatedImage &&
+
+
+

+
+
+ }
+
+ Generate sound with Diffsound (AWS)
+ Endpoints:
+
/sound?s={"{s}"}
+
+ - s (string): text to convert
+
+
+
+ {loadingSound &&
+
+
+
+
+
+ }
+ {!loadingSound && !generatedSound && No sound data
}
+ {!loadingSound && generatedSound &&
+
+ }
+
+ Generate Pixel Art (AWS)
+ Endpoints:
+
/generate : kick off a new image generation job and add it to the backlog. returns the id of the job
+
/generate_result : retrieve
+
/prompt_tags : returns the current tags added to prompts
+
+
+ {loadingPixelArt && Loading, can take up to one minute...
}
+ {!loadingPixelArt && !generatedPixelArt && No Pixel Art data
}
+ {!loadingPixelArt && generatedPixelArt &&
+
+
+

+
+
+ }
+
+ Generate image captioning using BLIP
+ Endpoints:
+
POST /upload
+
+ - FormData task (string | optional): the task to run (image_captioning, vqa, feature_extraction or text_matching)
+ - FormData file (file): the image to get the text caption
+
+
POST /upload/url
+
Body:
+
+ {"{"}
+ "task": {""},
+ "file": {""}
+ {"}"}
+
+
+ - Body task (string | optional): the task to run (image_captioning, vqa, feature_extraction or text_matching)
+ - Body file (string): the image url to get the text caption
+
+
+
+ {loadingBlip &&
+
+
+
+
+
+ }
+ {!loadingBlip && !generatedBlip && No BLIP data
}
+ {!loadingBlip && generatedBlip &&
+
+
+ {generatedBlip}
+
+
+ }
+
+
+
+ )
+}