Compare commits
No commits in common. "main" and "0.3.0" have entirely different histories.
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,6 +1,5 @@
|
||||||
# Sensitive files
|
# Sensitive files
|
||||||
config.py
|
config.py
|
||||||
*.png
|
|
||||||
|
|
||||||
# ---> Python
|
# ---> Python
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
|
|
|
@ -27,7 +27,6 @@ You can manipulate the main abstractions of the OpenAI model:
|
||||||
| --------------------------------------- | ----------------- | --------------- | --------------- | -------------------------------------------------------- |
|
| --------------------------------------- | ----------------- | --------------- | --------------- | -------------------------------------------------------- |
|
||||||
| `cancel_model_response.py` | `response_id` | | | Cancel a Response running in the background |
|
| `cancel_model_response.py` | `response_id` | | | Cancel a Response running in the background |
|
||||||
| `create_conversation.py` | | | | Create a new Conversation |
|
| `create_conversation.py` | | | | Create a new Conversation |
|
||||||
| `create_image.py` | `model_id` | `prompt` | | Create an image based on the prompt description |
|
|
||||||
| `create_model_response.py` | `conversation_id` | `model_id` | `input_message` | Create a Response within a Conversation |
|
| `create_model_response.py` | `conversation_id` | `model_id` | `input_message` | Create a Response within a Conversation |
|
||||||
| `create_text_item.py` | `conversation_id` | `input_message` | | Create a user input or instruction within a Conversation |
|
| `create_text_item.py` | `conversation_id` | `input_message` | | Create a user input or instruction within a Conversation |
|
||||||
| `create_vector_store.py` | | | | Create a Vector Store |
|
| `create_vector_store.py` | | | | Create a Vector Store |
|
||||||
|
|
|
@ -4,8 +4,6 @@
|
||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
| ------- | ------------------ |
|
| ------- | ------------------ |
|
||||||
| 0.5.x | :heavy_check_mark: |
|
|
||||||
| 0.4.x | :heavy_check_mark: |
|
|
||||||
| 0.3.x | :heavy_check_mark: |
|
| 0.3.x | :heavy_check_mark: |
|
||||||
| 0.2.x | :heavy_check_mark: |
|
| 0.2.x | :heavy_check_mark: |
|
||||||
| 0.1.x | :heavy_check_mark: |
|
| 0.1.x | :heavy_check_mark: |
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
base_url = "https://api.openai.com/v1/"
|
base_url = "https://api.openai.com/v1/"
|
||||||
secret_key = "YOUR-OPENAI-SECRET-KEY-HERE"
|
secret_key = "YOUR-OPENAI-SECRET-KEY-HERE"
|
||||||
vector_store = ["YOUR-VECTOR-STORE-IDs-HERE","SEPARATED-BY-COMMAS"]
|
vector_store = "YOUR-VECTOR-STORE-ID-HERE"
|
||||||
|
|
||||||
|
|
|
@ -1,33 +0,0 @@
|
||||||
from config import *
|
|
||||||
import requests
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
import base64
|
|
||||||
from PIL import Image
|
|
||||||
from io import BytesIO
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
def create_image(model,prompt):
|
|
||||||
url = base_url+"images/generations"
|
|
||||||
|
|
||||||
payload = json.dumps({
|
|
||||||
"prompt": prompt,
|
|
||||||
"model": model
|
|
||||||
})
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
'Authorization': 'Bearer '+secret_key,
|
|
||||||
}
|
|
||||||
date_time_string = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
|
|
||||||
|
|
||||||
response = requests.request("POST", url, headers=headers, data=payload)
|
|
||||||
decoded_bytes = base64.b64decode(response.json()["data"][0]["b64_json"])
|
|
||||||
byte_stream = BytesIO(decoded_bytes)
|
|
||||||
image = Image.open(byte_stream)
|
|
||||||
image.save("./output_images/output-"+date_time_string+".png", "PNG")
|
|
||||||
|
|
||||||
print("Image ./output_images/output-"+date_time_string+".png has been generated.")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
create_image(str(sys.argv[1]),str(sys.argv[2]))
|
|
|
@ -10,7 +10,7 @@ def create_model_response(conversation,model,message):
|
||||||
"model": model,
|
"model": model,
|
||||||
"tools": [
|
"tools": [
|
||||||
{"type": "web_search"},
|
{"type": "web_search"},
|
||||||
{"type": "file_search", "vector_store_ids" :vector_store}
|
{"type": "file_search", "vector_store_ids" :[vector_store]}
|
||||||
],
|
],
|
||||||
"input": message,
|
"input": message,
|
||||||
"conversation": conversation
|
"conversation": conversation
|
||||||
|
@ -23,7 +23,5 @@ def create_model_response(conversation,model,message):
|
||||||
response = requests.request("POST", url, headers=headers, data=payload)
|
response = requests.request("POST", url, headers=headers, data=payload)
|
||||||
|
|
||||||
print(response.text)
|
print(response.text)
|
||||||
print(response.json()["output"][-1]["content"][0]["text"])
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
create_model_response(str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))
|
create_model_response(str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))
|
||||||
|
|
|
@ -13,7 +13,6 @@ def retrieve_item(conversation,message):
|
||||||
response = requests.request("GET", url, headers=headers, data=payload)
|
response = requests.request("GET", url, headers=headers, data=payload)
|
||||||
|
|
||||||
print(response.text)
|
print(response.text)
|
||||||
print(response.json()["content"][0]["text"])
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -13,7 +13,6 @@ def retrieve_model_response(response):
|
||||||
response = requests.request("GET", url, headers=headers, data=payload)
|
response = requests.request("GET", url, headers=headers, data=payload)
|
||||||
|
|
||||||
print(response.text)
|
print(response.text)
|
||||||
print(response.json()["output"][-1]["content"][0]["text"])
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
retrieve_model_response(str(sys.argv[1]))
|
retrieve_model_response(str(sys.argv[1]))
|
||||||
|
|
Loading…
Reference in a new issue