Compare commits

...

6 commits
0.3.0 ... main

9 changed files with 43 additions and 2 deletions

1
.gitignore vendored
View file

@ -1,5 +1,6 @@
# Sensitive files
config.py
*.png
# ---> Python
# Byte-compiled / optimized / DLL files

View file

@ -27,6 +27,7 @@ You can manipulate the main abstractions of the OpenAI model:
| --------------------------------------- | ----------------- | --------------- | --------------- | -------------------------------------------------------- |
| `cancel_model_response.py` | `response_id` | | | Cancel a Response running in the background |
| `create_conversation.py` | | | | Create a new Conversation |
| `create_image.py` | `model_id` | `prompt` | | Create an image based on the prompt description |
| `create_model_response.py` | `conversation_id` | `model_id` | `input_message` | Create a Response within a Conversation |
| `create_text_item.py` | `conversation_id` | `input_message` | | Create a user input or instruction within a Conversation |
| `create_vector_store.py` | | | | Create a Vector Store |

View file

@ -4,6 +4,8 @@
| Version | Supported |
| ------- | ------------------ |
| 0.5.x | :heavy_check_mark: |
| 0.4.x | :heavy_check_mark: |
| 0.3.x | :heavy_check_mark: |
| 0.2.x | :heavy_check_mark: |
| 0.1.x | :heavy_check_mark: |

View file

@ -1,4 +1,4 @@
base_url = "https://api.openai.com/v1/"
secret_key = "YOUR-OPENAI-SECRET-KEY-HERE"
vector_store = "YOUR-VECTOR-STORE-ID-HERE"
vector_store = ["YOUR-VECTOR-STORE-IDs-HERE","SEPARATED-BY-COMMAS"]

33
create_image.py Normal file
View file

@ -0,0 +1,33 @@
from config import *
import requests
import json
import sys
import base64
from PIL import Image
from io import BytesIO
from datetime import datetime
def create_image(model,prompt):
url = base_url+"images/generations"
payload = json.dumps({
"prompt": prompt,
"model": model
})
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer '+secret_key,
}
date_time_string = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
response = requests.request("POST", url, headers=headers, data=payload)
decoded_bytes = base64.b64decode(response.json()["data"][0]["b64_json"])
byte_stream = BytesIO(decoded_bytes)
image = Image.open(byte_stream)
image.save("./output_images/output-"+date_time_string+".png", "PNG")
print("Image ./output_images/output-"+date_time_string+".png has been generated.")
if __name__ == '__main__':
create_image(str(sys.argv[1]),str(sys.argv[2]))

View file

@ -10,7 +10,7 @@ def create_model_response(conversation,model,message):
"model": model,
"tools": [
{"type": "web_search"},
{"type": "file_search", "vector_store_ids" :[vector_store]}
{"type": "file_search", "vector_store_ids" :vector_store}
],
"input": message,
"conversation": conversation
@ -23,5 +23,7 @@ def create_model_response(conversation,model,message):
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
print(response.json()["output"][-1]["content"][0]["text"])
if __name__ == '__main__':
create_model_response(str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))

View file

View file

@ -13,6 +13,7 @@ def retrieve_item(conversation,message):
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
print(response.json()["content"][0]["text"])
if __name__ == '__main__':

View file

@ -13,6 +13,7 @@ def retrieve_model_response(response):
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
print(response.json()["output"][-1]["content"][0]["text"])
if __name__ == '__main__':
retrieve_model_response(str(sys.argv[1]))