Skip to content
Snippets Groups Projects
Commit b6a09565 authored by Swetha Lakshmana Murthy's avatar Swetha Lakshmana Murthy
Browse files

Add graphene_llm_readme_gen with OpenAI folder

parent 5f184dc3
No related branches found
No related tags found
No related merge requests found
cicd.ai4eu-dev.eu:7444/tutorials/graphene_llm_readme_gen/openai:v1
\ No newline at end of file
FROM python:3.8
RUN apt-get update -y
RUN apt-get install -y python3-pip python3-dev
RUN pip3 install --upgrade pip
COPY requirements.txt .
RUN pip3 install -r requirements.txt
ARG TIMEZONE=Europe/Berlin
RUN ln -sf /usr/share/zoneinfo/$TIMEZONE /etc/localtime && \
echo "$TIMEZONE" > /etc/timezone
RUN mkdir /openai
COPY . /openai
WORKDIR /openai
RUN python3 -m grpc_tools.protoc --python_out=. --proto_path=. --grpc_python_out=. openai_llms.proto
ENTRYPOINT python3 -u server.py
from flask import Flask, render_template, request, session, redirect, url_for
from flask_wtf import FlaskForm
from flask_bootstrap import Bootstrap
from wtforms.validators import DataRequired
from wtforms import StringField, FloatField, IntegerField, SubmitField
from openai import OpenAI
app = Flask(__name__)
parameters = []
class ApiForm(FlaskForm):
selected_model = StringField('Selected Model', validators=[DataRequired()])
temperature = FloatField('Temperature', validators=[DataRequired()])
max_tokens = IntegerField('Max Tokens', validators=[DataRequired()])
top_p = FloatField('Top P', validators=[DataRequired()])
frequency_penalty = FloatField('Frequency Penalty', validators=[DataRequired()])
presence_penalty = FloatField('Presence Penalty', validators=[DataRequired()])
submit_button = SubmitField('Submit')
def get_model_names(api_key=None, org_id=None):
try:
if api_key is None:
api_key = session.get('api_key')
if org_id is None:
org_id = session.get('org_id')
if api_key and org_id:
client = OpenAI(organization=org_id, api_key=api_key)
model_names_metadata = client.models.list()
model_names = [model.id for model in model_names_metadata.data]
return model_names
else:
return []
except Exception as e:
print(f"Error fetching model names: {e}")
return []
@app.route('/', methods=['GET', 'POST'])
def index():
error_message = None
if request.method == 'POST':
# Store keys in session
session['api_key'] = request.form['api_key']
session['org_id'] = request.form['org_id']
model_names = get_model_names(session['api_key'], session['org_id'])
if model_names:
return redirect(url_for('select_model'))
else:
error_message = "Failed to retrieve model names. Please check your API key and organization ID."
print(error_message)
return render_template('index.html', error_message=error_message)
return render_template('index.html')
@app.route('/select_model', methods=['GET', 'POST'])
def select_model():
model_form = ApiForm()
if request.method == 'POST':
if 'clear_session' in request.form:
session.pop('api_key', None)
session.pop('org_id', None)
return redirect(url_for('index'))
else:
parameters.clear()
parameters.append(session['api_key'])
parameters.append(session['org_id'])
parameters.append(request.form['selected_model'])
parameters.append(model_form.temperature.data)
parameters.append(model_form.max_tokens.data)
parameters.append(model_form.top_p.data)
parameters.append(model_form.frequency_penalty.data)
parameters.append(model_form.presence_penalty.data)
model_names = get_model_names()
return render_template('index.html', model_form=model_form, model_names=model_names)
def get_parameters():
return parameters
def app_run():
app.secret_key = "openai_llms"
bootstrap = Bootstrap(app)
app.run(host="0.0.0.0", port=8062, debug=False)
import grpc
import logging
import openai_llms_pb2_grpc
import openai_llms_pb2
port = 8061
def run():
print("Calling OpenAI_LLM_Service_Stub..")
try:
with grpc.insecure_channel('localhost:{}'.format(port)) as channel:
stub = openai_llms_pb2_grpc.OpenAIApiServiceStub(channel)
ui_request = openai_llms_pb2.Empty()
response = stub.get_llm_parameters(ui_request)
print(response)
except Exception as e:
print(e)
if __name__ == '__main__':
logging.basicConfig()
run()
syntax = "proto3";
message Empty {
// Empty message
}
message ModelParameters {
string api_key = 1;
string org_id = 2;
string selected_model = 3;
float temperature = 4;
int32 max_tokens = 5;
float top_p = 6;
float frequency_penalty = 7;
float presence_penalty = 8;
}
service OpenAIApiService {
rpc get_llm_parameters(Empty) returns (ModelParameters);
}
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: openai_llms.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x11openai_llms.proto\"\x07\n\x05\x45mpty\"\xb7\x01\n\x0fModelParameters\x12\x0f\n\x07\x61pi_key\x18\x01 \x01(\t\x12\x0e\n\x06org_id\x18\x02 \x01(\t\x12\x16\n\x0eselected_model\x18\x03 \x01(\t\x12\x13\n\x0btemperature\x18\x04 \x01(\x02\x12\x12\n\nmax_tokens\x18\x05 \x01(\x05\x12\r\n\x05top_p\x18\x06 \x01(\x02\x12\x19\n\x11\x66requency_penalty\x18\x07 \x01(\x02\x12\x18\n\x10presence_penalty\x18\x08 \x01(\x02\x32\x42\n\x10OpenAIApiService\x12.\n\x12get_llm_parameters\x12\x06.Empty\x1a\x10.ModelParametersb\x06proto3')
_EMPTY = DESCRIPTOR.message_types_by_name['Empty']
_MODELPARAMETERS = DESCRIPTOR.message_types_by_name['ModelParameters']
Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), {
'DESCRIPTOR' : _EMPTY,
'__module__' : 'openai_llms_pb2'
# @@protoc_insertion_point(class_scope:Empty)
})
_sym_db.RegisterMessage(Empty)
ModelParameters = _reflection.GeneratedProtocolMessageType('ModelParameters', (_message.Message,), {
'DESCRIPTOR' : _MODELPARAMETERS,
'__module__' : 'openai_llms_pb2'
# @@protoc_insertion_point(class_scope:ModelParameters)
})
_sym_db.RegisterMessage(ModelParameters)
_OPENAIAPISERVICE = DESCRIPTOR.services_by_name['OpenAIApiService']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_EMPTY._serialized_start=21
_EMPTY._serialized_end=28
_MODELPARAMETERS._serialized_start=31
_MODELPARAMETERS._serialized_end=214
_OPENAIAPISERVICE._serialized_start=216
_OPENAIAPISERVICE._serialized_end=282
# @@protoc_insertion_point(module_scope)
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import openai_llms_pb2 as openai__llms__pb2
class OpenAIApiServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.get_llm_parameters = channel.unary_unary(
'/OpenAIApiService/get_llm_parameters',
request_serializer=openai__llms__pb2.Empty.SerializeToString,
response_deserializer=openai__llms__pb2.ModelParameters.FromString,
)
class OpenAIApiServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def get_llm_parameters(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OpenAIApiServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'get_llm_parameters': grpc.unary_unary_rpc_method_handler(
servicer.get_llm_parameters,
request_deserializer=openai__llms__pb2.Empty.FromString,
response_serializer=openai__llms__pb2.ModelParameters.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'OpenAIApiService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class OpenAIApiService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def get_llm_parameters(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/OpenAIApiService/get_llm_parameters',
openai__llms__pb2.Empty.SerializeToString,
openai__llms__pb2.ModelParameters.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
grpcio==1.38.0
grpcio-tools==1.38.0
grpc-interceptor
multithreading
protobuf==3.16.0
Bootstrap-Flask
Flask
Flask-WTF
flask_session
WTForms
openai
\ No newline at end of file
from concurrent import futures
import grpc
import openai_llms_pb2
import openai_llms_pb2_grpc
import threading
from app import app_run, get_parameters
class OpenAIService(openai_llms_pb2_grpc.OpenAIApiServiceServicer):
def __init__(self):
super().__init__()
self.send_data = True
def get_llm_parameters(self, request, context):
try:
params = get_parameters()
if not params:
print("Error: Parameters list is empty.")
return openai_llms_pb2.ModelParameters()
response = openai_llms_pb2.ModelParameters(
api_key=params[0],
org_id=params[1],
selected_model=params[2],
temperature=params[3],
max_tokens=params[4],
top_p=params[5],
frequency_penalty=params[6],
presence_penalty=params[7],
)
if not self.send_data:
context.set_code(grpc.StatusCode.NOT_FOUND)
context.set_details("all data has been processed")
self.send_data = not self.send_data
print('Received response \n {}'.format(response))
return response
except Exception as e:
print(e)
return None
def serve(port):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
openai_llms_pb2_grpc.add_OpenAIApiServiceServicer_to_server(OpenAIService(), server)
server.add_insecure_port("[::]:{}".format(port))
print("Starting server. Listening on port : " + str(port))
server.start()
threading.Thread(target=app_run()).start()
server.wait_for_termination()
if __name__ == "__main__":
port = 8061
serve(port)
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>OpenAI Model Selection and Configuration</title>
<style>
body {
font-family: 'Arial', sans-serif;
margin: 40px;
background-color: #f4f4f4; /* Set your desired background color */
}
h1 {
color: #333;
}
form {
max-width: 500px;
margin: 40px 0;
padding: 25px;
border: 1px solid #ccc;
border-radius: 5px;
background-color: #fff; /* Set your desired background color */
}
label {
display: block;
margin-bottom: 5px;
color: #555;
}
input, select {
width: 100%;
padding: 8px;
margin-bottom: 10px;
box-sizing: border-box;
border: 1px solid #ccc;
border-radius: 3px;
}
input[type="submit"] {
background-color: #8dbe8f;
color: white;
cursor: pointer;
}
input[type="submit"]:hover {
background-color: #3c7a3f;
}
p {
margin-top: 10px;
color: #333;
}
p.error {
color: red;
}
p.warning {
color: orange;
}
p.message {
color: rgb(121, 120, 177);
}
</style>
<script>
window.onload = function () {
document.getElementById("form").reset();
}
</script>
</head>
<body>
{% if request.endpoint == 'index' %}
<h1>Enter OpenAI API Keys</h1>
<form method="POST">
<label for="api_key">API Key:</label>
<input type="password" id="api_key" name="api_key" value="{{ api_key_partial }}" required><br><br>
<label for="org_id">Organization ID:</label>
<input type="password" id="org_id" name="org_id" value="{{ org_id_partial }}" required><br><br>
<input type="submit" value="Submit">
{% if error_message %}
<p style="font-size: 12px; color: red;">Error: {{ error_message }}</p>
{% endif %}
</form>
{% elif request.endpoint == 'select_model' %}
<h1>Select LLM Parameters</h1>
<form method="POST" autocomplete="off">
{{ model_form.csrf_token }}
<strong><label for="selected_model">Select Model:</label></strong>
<select name="selected_model" id="selected_model">
{% for model in model_names %}
<option value="{{ model }}">{{ model }}</option>
{% endfor %}
</select>
<br>
<p style="font-size: 12px; color: #757373;">
Please note that the below parameters need not be selected if the model does not require them.
Please refer to the <a href="https://platform.openai.com/docs/api-reference/introduction" target="_blank">OpenAI website</a> for further information.
</p>
<strong><label for="temperature">Temperature:</label></strong>
<input type="number" name="temperature" step="0.1" value="0.1" min="0.1" max="2.0" required>
<br>
<p style="font-size: 12px; color: #757373;">Temperature controls the randomness of the model's output.
A lower value (e.g., 0.1) makes the output more focused and deterministic, while a higher value (e.g., 2.0) makes it more creative and varied.</p>
<strong><label for="max_tokens">Max Tokens:</label></strong>
<input type="number" name="max_tokens" value="256" required>
<br>
<p style="font-size: 12px; color: #757373;">The maximum number of tokens to generate in the completion. Please refer to the OpenAI website for further information regarding each model's token limit.</p>
<strong><label for="top_p">Top P:</label></strong>
<input type="number" name="top_p" step="0.1" value="1.0" min="0.1" max="1.0" required>
<br>
<p style="font-size: 12px; color: #757373;">Top P (nucleus sampling) controls the diversity of the model's output by limiting the cumulative probability of the most likely tokens. A lower value (e.g., 0.1) makes the output more focused, while a higher value (e.g., 1.0) allows for more diverse outputs.</p>
<strong><label for="frequency_penalty">Frequency Penalty:</label></strong>
<input type="number" name="frequency_penalty" step="0.1" value="0.0" min="-2.0" max="2.0" required>
<br>
<p style="font-size: 12px; color: #757373;">A higher frequency penalty value leads to a more significant reduction in the likelihood of repeated words in the generated text.</p>
<strong><label for="presence_penalty">Presence Penalty:</label></strong>
<input type="number" name="presence_penalty" step="0.1" value="0.0" min="-2.0" max="2.0" required>
<br>
<p style="font-size: 12px; color: #757373;"> A higher presence penalty results in a reduced likelihood of the model incorporating tokens from the prompt into the generated text. </p>
<input type="submit" value="Submit">
<input type="submit" name="clear_session" value="Clear Session">
</form>
{% endif %}
</body>
</html>
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment