# This file is part of Sympathy for Data.
# Copyright (c) 2025, Combine Control Systems AB
#
# SYMPATHY FOR DATA COMMERCIAL LICENSE
# You should have received a link to the License with Sympathy for Data.
from sympathy.api import node
from sympathy.api import ParameterView
from sympathy.api.nodeconfig import Port, Ports, Tag, Tags
from PySide6.QtWidgets import QLabel, QVBoxLayout
from sympathy.utils import parameters as utils_parameters
from sympathy.utils import credentials as utils_credentials
from sympathy.api.exceptions import SyConfigurationError
from openai import OpenAI
from anthropic import Anthropic
third_party_notice = [
"Note: Using LLM APIs requires third-party integration,",
"sharing the provided data with the service.",
"LLMs may generate inaccuracies, unintended outputs,",
"or misleading information, so verification is advised. ",
]
class CustomConfig(ParameterView):
def __init__(self, parameters, parent=None):
super().__init__(parent=parent)
notice_label = QLabel(" ".join(third_party_notice))
notice_label.setWordWrap(True) # Enable text wrapping
notice_label.setStyleSheet("color: gray; font-size: 10px;")
layout = QVBoxLayout()
layout.addWidget(parameters.gui())
layout.addWidget(notice_label)
self.setLayout(layout)
[docs]
class Llm(node.Node):
"""
This node is used to connect your flow with a Large Language Model (LLM)
API. Different API's can be used according to preference and availability.
The prompt is entered in the node's configuration and the response from the
LLM is provided on the output port. Additional data can also be provided on
the :ref:`optional input port<node_section_ports>`.
API key
=======
Your have to provide your own API key use this node.
Read more about how to obtain API keys on the services' websites:
* OpenAI: https://platform.openai.com/docs/overview
* Anthropic: https://docs.anthropic.com/en/home
.. caution::
As with any other secrets, storing the API key directly in the node
configuration is discouraged. Instead use
:ref:`credentials<preferences_credentials>` to store the key securely.
"""
name = 'Large Language Model API'
description = 'Connect to a Large Language Model API using an API key'
nodeid = 'org.sysess.sympathy.machinelearning.llm'
icon = 'neural_net_brain.svg'
tags = Tags(Tag.MachineLearning.Apply)
inputs = Ports([
Port.Custom(
port_type='text',
description='Optional data sent to the API.',
name='data',
n=(0, None, 0)
),
])
outputs = Ports([
Port.Custom(
port_type='text',
description='Recieved response from the API',
name='response',
n=(1, 1, 1)
),
])
parameters = node.parameters()
parameters.set_list(
name="service",
label='Service',
description=("Select the LLM API service you want to use. "
"You need to provide your own API key."),
plist=['OpenAI', 'Anthropic'],
)
utils_parameters.set_secret_connection(
parameters,
name="secret",
label="Key",
description='Your API key from the API service you have chosen')
parameters.set_list(
name="prompt_mode",
label='Answer Style',
description=("Sets the LLMs answer tone to "
"abide rules for output. "
"Use strict for cleaner data output."),
list=['normal', 'strict', 'none'])
parameters.set_string(
name='prompt',
value='Say hello',
label='Prompt',
description="Enter prompt to be sent to the API",
editor=node.editors.textedit_editor())
def execute(self, node_context):
# Gets optional input data.
input_group = node_context.input.group("data")
if input_group:
data = input_group[0].get()
else:
data = ""
# Extracts chosen attributes
service = node_context.parameters['service'].selected
prompt = node_context.parameters['prompt'].value
prompt_mode = node_context.parameters['prompt_mode'].selected
# Secret Credential key
connection = node_context.parameters['secret'].value
resource = connection.resource
mode = connection.credentials.mode # Secret/login/azure
assert mode == utils_credentials.secrets_mode
secrets = utils_credentials.get_secret_credentials(self, connection)
expanded_value = utils_credentials.expand_secrets(resource, secrets)
key = expanded_value # secret value
if service == 'OpenAI':
# Create OpenAI API object
llm_api = OpenaiAPI(
api_key=key,
mode=prompt_mode,
data=data)
elif service == 'Anthropic':
# Create Anthropic API object
llm_api = AnthropicAPI(
api_key=key,
mode=prompt_mode,
data=data)
else:
raise SyConfigurationError(f"Unknown service: {service}")
# Retrieve the respons from the LLM API
api_response = llm_api.get_text_response(prompt)
# Set the output text to API response text
output_port = node_context.output['response']
output_port.set(api_response)
def exec_parameter_view(self, node_context):
return CustomConfig(node_context.parameters)
class APIBase:
"""
Base class for API wrappers.
"""
dev_prompts = {
"normal": """
[Instructions to behave and follow]
You are a helpful expert at managing data and to follow
instructions.
You will provide well balanced answers and when data is
provided you will be very careful to make sure you provide
the correct data.
IF given a data analysis task you make sure to only give
the data and no unnecessary information, just pure data.
If no data is provided, don't mention it.
Data formats can be tabular, csv, xlsx, xls, json or text.
Make sure to give appropriate data analysis answers with
those formats.
Help the user with datascience tasks!
""",
"strict": """
[Instructions to follow strictly]
You are a helpful expert at managing data and to follow
instructions.
You will provide well correct logical answers!
IF data is provided you will be very careful to make sure
the data is kept correct and instructions to change the data
should be carefully done.
When given a data analysis task you make sure to only give
the data and no unnecessary information, just pure data.
If no data is provided, don't mention it.
Any other task than converting or changing data is
strictly prohibited!
Data formats can be tabular, csv, xlsx, xls, json or text.
Make sure to give appropriate data analysis answers with
those formats.
If the output data is required to be tabular, make it as an
nd array in python!
Help the user with datascience tasks!
""",
"none": """
Help the user with their questions!
"""
}
def __init__(self, api_key: str, mode: str = "normal", data=""):
self.mode = mode
self.data = data
def get_text_response(self, prompt) -> str:
"""
Returns the text response from the LLM API.
"""
raise NotImplementedError(
"This method should be overridden in subclasses.")
def get_system_prompt(self, mode: str) -> str:
"""
Returns the system prompt for the LLM API.
"""
return self.dev_prompts[mode]
class OpenaiAPI(APIBase):
"""
OpenAI API wrapper for the LLM API.
"""
def __init__(self, api_key: str, mode: str = "normal", data=""):
super().__init__(api_key, mode, data)
self.client = OpenAI(api_key=api_key)
def get_text_response(self, prompt) -> str:
completion = self.client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "system",
"content": [
{
"type": "text",
"text": self.get_system_prompt(self.mode)
}
]
},
{
"role": "user",
"content": prompt,
},
{
"role": "user",
"content": "[Data to be handled/used]:\n" + self.data
}
],
response_format={
"type": "text"
},
)
return completion.choices[0].message.content
class AnthropicAPI(APIBase):
"""
Anthropic API wrapper for the LLM API.
"""
def __init__(self, api_key: str, mode: str = "normal", data=""):
super().__init__(api_key, mode, data)
self.client = Anthropic(api_key=api_key)
def get_text_response(self, prompt) -> str:
message = self.client.messages.create(
model="claude-3-5-haiku-latest",
max_tokens=1000,
temperature=1,
system=self.get_system_prompt(self.mode),
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": prompt
}
]
},
{
"role": "user",
"content": "[Data to be handled/used]:\n" + self.data
}
]
)
return message.content[0].text