xxxxxxxxxx
251
// bring in the Hugging Face library's Inference function
import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/inference@2.7.0/+esm';
// CONNECT TO HF API
// replace the HF_TOKEN value with your own
const HF_TOKEN = "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";
const inference = new HfInference(HF_TOKEN);
var promptArray = []
var PROMPT_INPUT = `` // a field for writing or changing a text value
var promptField // an html element to hold the prompt
// var outText, outPics, outCaption, outInfo // html elements to hold the results
var blanksArray = [] // an empty list to store all the variables we enter to modify the prompt
var modelOutput, resText, resImg // empty variables to store model results
//// models section ////
// Text-to-Image GROUP TASK
async function textImgGroupTask(pArray){
let MODEL = 'black-forest-labs/FLUX.1-dev'
let resultArray = []
for (let p in pArray){
const blobImg = await inference.textToImage({
model: MODEL,
inputs: pArray[p],
parameters: {
// negative_prompt: "blurry text",
guidance_scale: 3.5,
height: 512,
width: 512,
},
})
const url = await URL.createObjectURL(blobImg)
resultArray.push(url)
}
console.log(resultArray)
return [resultArray, MODEL]
}
// Chat-Completion TASK, array of prompts
async function chatCompGroupTask(pArray){
let MODEL = 'HuggingFaceH4/zephyr-7b-beta'
// let MODEL = "mistralai/Mistral-7B-Instruct-v0.2"
// let MODEL = "gpt2"
let resultArray = []
for (let p in pArray){
const chatTask = await inference.chatCompletion({
model: MODEL,
messages: [{role: "user", content: pArray[p]}],
max_tokens: 100
});
var result = chatTask.choices[0].message;
resText = result.content
resultArray.push(resText)
}
console.log(resultArray)
return [resultArray, MODEL]
}
// Chat-Completion TASK, one prompt only
async function chatCompTask(prompt){
// let MODEL = 'HuggingFaceH4/zephyr-7b-beta'
let MODEL = "mistralai/Mistral-7B-Instruct-v0.2"
const chatTask = await inference.chatCompletion({
model: MODEL,
messages: [{role: "user", content: prompt}], // use if no preprompt
// messages: [{role: "system", content: PREPROMPT},{ role: "user", content: prompt }], // can use this 'system' role to add instructions to the chatbot in a preprompt
max_tokens: 150
});
var result = chatTask.choices[0].message;
resText = result.content
return resText
}
// Text-Generation TASK, some text generating models use this API point instead of the chat completion API point
async function textGenTask(prompt){
// let MODEL = 'HuggingFaceH4/zephyr-7b-beta'
let MODEL = 'Xenova/bloom-560m'
const textGenTask = await inference.textGeneration({
model: MODEL,
inputs: prompt,
max_tokens: 150
})
var resText = textGenTask[0].generated_text
console.log(resText)
return resText
}
//// end of models section ////
//// p5.js section, web interface ////
//create instance of p5 workspace (needed because we import a module)
new p5(function(p5){
p5.setup = function(){
p5.noCanvas()
console.log('p5 instance loaded')
makeTextModules()
makeInputModule()
makeExamplesModule()
makeCritModule()
makeOutputModule()
}
//using blanks you fill in
function makeTextModules(){
const introDiv = p5.createDiv().class('module').id('intro')
p5.createElement('h1','p5.js Critical AI Prompt Battle').parent(introDiv)
p5.createP(`What do AI models really 'know' about you — about your community, your language, your culture? What do they 'know' about different concepts, ideas, and worldviews? This tool lets you compare the results of multiple AI-generated texts and images side-by-side, to explore variations of a prompt. For more info on prompt programming and critical AI, see <A href="">[TUTORIAL-LINK]</a>.`).parent(introDiv)
}
function makeInputModule(){
const inputDiv = p5.createDiv().class('module', 'main').id('inputDiv')
p5.createElement('h4', 'INPUT').parent(inputDiv)
p5.createElement('h3', 'Create a prompt template').class('header').parent(inputDiv)
p5.createP(`Write a prompt template that includes one wildcard * (asterisk). You will fill in options for this wildcard in the next step. Phrase your prompt so that * highlights a quality you want to investigate, such as appearance, profession, gender, or country of origin. `).parent(inputDiv)
promptField = p5.createInput(PROMPT_INPUT).parent(inputDiv) // turns the string into an input; now access the text via PROMPT_INPUT.value()
promptField.size(600)
p5.createP(promptField.attribute('label')).parent(inputDiv)
promptField.addClass("prompt")
p5.createElement('h3', 'Fill in your blanks').class('header').parent(inputDiv)
p5.createP('Now add three words or phrases in the boxes below. These will replace the * in your prompt when the model runs.').parent(inputDiv)
// p5.createP('(e.g. "beads", "a suit", "her hair down" )').parent(inputDiv)
addField()
addField()
addField()
// press to run model
const submitButton = p5.createButton("RUN PROMPT")
submitButton.size(170)
submitButton.class('button').parent(inputDiv)
submitButton.mousePressed(displayOutput)
}
function makeExamplesModule(){
const examDiv = p5.createDiv().class('module', 'main').id('examples').parent(inputDiv).style('float:right;margin:0;')
p5.createElement('h4', 'EXAMPLE PROMPT TEMPLATES').class('header').parent(examDiv)
p5.createP(`The doctor is wearing a *`).parent(examDiv)
p5.createP(`"labcoat", "suit", "headscarf"`).class('prompt').parent(examDiv)
p5.createP(`"The woman is wearing * and is on her way to..."`).parent(examDiv)
p5.createP(`"beads", "a suit", "her hair down"`).class('prompt').parent(examDiv)
p5.createP(`The man is riding a *`).parent(examDiv)
p5.createP(`"horse", "elephant", "motorcycle"`).class('prompt').parent(examDiv)
p5.createP(`The * person was stopped on their way to...`).parent(examDiv)
p5.createP(`"queer", "trans", "straight"`).class('prompt').parent(examDiv)
}
function addField(){
let f = p5.createInput("").parent(inputDiv)
f.class("blank")
blanksArray.push(f)
console.log("made variable field")
}
function makeCritModule(){
const critDiv = p5.createDiv('').class('module', 'main').id('critDiv')
p5.createElement('h4', 'CRITICAL INQUIRY').class('header').parent(critDiv)
p5.createP(`• Notice how the outputs shift with each word choice. What is different in each case that you didn't expect? What environment is the subject in? Are they indoors or outdoors? Who are they around and what are they doing? What tropes are unsurprising? `).parent(critDiv)
p5.createP(`• Try updating your prompt with new * variables to see how your outputs change, or try a new prompt template altogether.`).parent(critDiv)
}
function makeOutputModule(){
const outputDiv = p5.createDiv().class('module').id('outputDiv')
const outHeader = p5.createElement('h4',"OUTPUT").parent(outputDiv)
// // make output placeholders
// placeholder DIV for images and captions
p5.createDiv().id('outPics').parent(outputDiv)
p5.createElement('h3', 'Text-to-image output').parent(outputDiv)
p5.createDiv('').id('outCaption').parent('#outPics')
// text-only output
p5.createDiv('').id('outText').parent(outputDiv)
p5.createElement('h3', 'Text output').parent(outputDiv)
// placeholder for about model, prompt, and hyperparams
p5.createDiv('').id('outInfo').parent(outputDiv)
p5.createElement('h3', 'Output info').parent(outputDiv)
}
async function displayOutput(){
console.log('submitButton just pressed')
// Clear output area for next model run
document.querySelector('#outText').innerHTML = ""
document.querySelector('#outInfo').innerHTML = ""
let placeholder = p5.createP("Please wait while all models are rendering").class('prompt').parent('#outPics')
placeholder.attribute('display', 'inherit')
// GRAB CURRENT FIELD INPUTS FROM PROMPT & BLANKS
PROMPT_INPUT = promptField.value() // grab update to the prompt if it's been changed
console.log("latest prompt: ", PROMPT_INPUT)
// create a list from the values in the blanks fields
let blanksValues = blanksArray.map(b => b.value())
console.log(blanksValues)
// fill in the prompt repeatedly with the values from blanks fields
blanksValues.forEach(b => {
let p = PROMPT_INPUT.replace(`*`,b)
promptArray.push(p)
})
console.log(promptArray)
// RUN IMAGE MODEL
let getOutputPicURLs = await textImgGroupTask(promptArray)
let res = getOutputPicURLs[0]
document.querySelector('#outPics').innerHTML = ""
for (let r in res){
let img = p5.createImg(res[r], promptArray[r]) // (url,alt-text)
img.size(300,300)
img.parent('#outPics')
}
// RUN TEXT MODEL
let getOutputText = await chatCompGroupTask(promptArray)
console.log(getOutputText[0])
//fill in all text outputs
for (let i in getOutputText[0]){
p5.createP(promptArray[i]).class('prompt').parent('#outText')
p5.createP(getOutputText[0][i], true).parent('#outText') // .html() false replaces text, true appends text
}
// DISPLAY MODEL NAME AND OTHER INFO
p5.createP("Text-to-Image Model: " + getOutputPicURLs[1]).parent('#outInfo')
p5.createP("Text-Generating Model: " + getOutputText[1]).parent('#outInfo')
// CLEAR VALUES FOR NEXT RUN
// placeholder.innerHTML = ''
blanksValues, blanksArray, promptArray = []
PROMPT_INPUT = ``
}
})