---
title: Connect to LLM Backend
type: plugins
category: Automation
cat: automation
order: 40
meta_title: Connect to LLM Backend
meta_description: Sends prompts to an LLM URL
---
!!! note
For information about modifying this plugin or creating your own custom plugins, see [Customize and Build Your Own Plugins](custom).
For general plugin information, see [Plugins for projects](/guide/plugins) and [Plugin FAQ](faq).
## About
This plugin connects to an open LLM endpoint, allowing you to generate responses as part of the annotation workflow.

## Plugin
Before using this, replace the `MY_URL_ROOT` value with a URL that does not require authentication. This would typically be an internal service or an LLM behind a proxy.
```javascript
window.LSI = LSI;
const baseUrl = "MY_URL_ROOT";
/**
* Makes a request to the configured LLM sending the given prompt
*/
async function fetchLLM(prompt) {
const params = {
prompt,
llm_endpoint_name: "chatgpt",
redteam_categories: ["cat1"],
};
const searchParams = new URLSearchParams(params).toString();
const url = `${baseUrl}?${searchParams}`;
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
// No auth needed because the API is open
},
});
const data = await response.json();
}
/**
* Sends the introduced prompt to the LLM endpoint and attaches the given results to the annotation
*/
async function sendPrompt() {
const promptTag = LSI.annotation.names.get("prompt");
promptTag.submitChanges();
const prompt = promptTag.result?.value.text.join("\n");
if (!prompt) {
Htx.showModal("The prompt is empty", "error");
return false;
}
let response;
// console.log("Input prompt:" + prompt);
try {
response = await fetchLLM(prompt);
} catch (error) {
Htx.showModal(
`Error fetching the LLM endpoint "${baseUrl}": ${error.message}`,
"error",
);
return false;
}
const results = [];
const llmResponse = response.LLM_response;
if (llmResponse) {
const llmResult = {
from_name: "response",
to_name: "placeholder",
type: "textarea",
value: { text: [] },
};
results.push(llmResult);
}
// console.log("Response:" + llmResponse["LLM_response"]);
const category = response.Category?.category;
if (category?.length) {
const attackResult = {
from_name: "category",
to_name: "placeholder",
type: "choices",
value: { choices: category },
};
results.push(attackResult);
// console.log("Category:" + category);
}
const reasonText = response.Type?.reason;
if (reasonText) {
const reasonResult = {
from_name: "reason",
to_name: "placeholder",
type: "textarea",
value: { text: [reasonText] },
};
results.push(reasonResult);
// console.log("Reason:" + reason);
}
LSI.annotation.deserializeResults(results);
}
/**
* Sets up the onClick event of the template to trigger the LLM request
*/
function setup() {
const aBtn = document.querySelector(".analyzeButton");
const button = document.createElement("button");
button.textContent = "Analyze"; // Set the button text
// Attach an onclick event to the button
button.onclick = sendPrompt;
// Insert the button into the div
aBtn.replaceChildren(button);
}
setup();
```
**Related LSI instance methods:**
* [.annotation)](custom#LSI-annotation)
## Labeling config
!!! info Tip
You can add `value="$text"` to the TextArea parameters of the prompt to pre-fill the text from your data.
```xml
```
**Related tags:**
* [View](/tags/view.html)
* [Style](/tags/style.html)
* [Text](/tags/text.html)
* [Header](/tags/header.html)
* [TextArea](/tags/textarea.html)
* [Choices](/tags/choices.html)
## Sample data
```json
[
{
"data": {
"text": "What is the closest relative to an opossum?"
}
},
{
"data": {
"text": "What is a fun opossum fact?"
}
},
{
"data": {
"text": "Why are opossums cool?"
}
}
]
```