xxxxxxxxxx
66
// This workflow expects
// - models/checkpoints/sdxl_lightning_2step.safetensors
// - models/controlnet/t2i-adapter-depth-midas-sdxl-1.0.fp16.safetensors
// to be available on the ComfyUI server
// Example based on
// - https://comfyanonymous.github.io/ComfyUI_examples/controlnet/#t2i-adapter-vs-controlnets
let workflow;
let comfy;
let srcImg;
let resImg;
function preload() {
workflow = loadJSON("workflow_api.json");
srcImg = loadImage("shark_depthmap_1024.png");
}
function setup() {
createCanvas(512, 512);
pixelDensity(2); // SDXL operates on 1024x1024
comfy = new ComfyUiP5Helper("https://gpu1.gohai.xyz:8188/");
console.log("workflow is", workflow);
}
function requestImage() {
// conditioning_to_strength is a number between 0 and 1, which
// determines how the embeddings of the two prompts are being combined (like lerp)
workflow[20] = comfy.image(srcImg);
// we can change the random seed here, which determines the
// noisy latent the generation starts out from
workflow[3].inputs.seed = random(9999999);
// and also control the textual prompt
//workflow[24].inputs.text = "fungi, moss, spores, underwater background";
comfy.run(workflow, gotImage);
}
function mousePressed() {
requestImage();
}
function gotImage(data, err) {
// data is an array of outputs from running the workflow
console.log("gotImage", data);
// you can load them like so
if (data.length > 0) {
resImg = loadImage(data[0].src);
}
// we could automatically run again if we wanted
//requestImage();
}
function draw() {
background(255);
// if we have an image, put it onto the canvas
if (resImg) {
image(resImg, 0, 0, width, height);
}
}