Compare commits
No commits in common. "22dc9e7275484137daefbaa3f3f57c0c1fde10a0" and "70194de64ceaa2666ecc44be7aad5e896a644046" have entirely different histories.
22dc9e7275
...
70194de64c
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,6 +1,5 @@
|
||||||
# Sensitive files
|
# Sensitive files
|
||||||
config.py
|
config.py
|
||||||
*.png
|
|
||||||
|
|
||||||
# ---> Python
|
# ---> Python
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
|
|
61
README.md
61
README.md
|
@ -23,37 +23,34 @@ You can manipulate the main abstractions of the OpenAI model:
|
||||||
|
|
||||||
* Each script has a number of mandatory parameters in strict order:
|
* Each script has a number of mandatory parameters in strict order:
|
||||||
|
|
||||||
| | Script | Parameter 1 | Parameter 2 | Parameter 3 | Purpose |
|
| Script | Parameter 1 | Parameter 2 | Parameter 3 | Purpose |
|
||||||
| --- | --------------------------------------- | ----------------- | --------------- | --------------- | ------------------------------------------------------------------------- |
|
| --------------------------------------- | ----------------- | --------------- | --------------- | -------------------------------------------------------- |
|
||||||
| | `cancel_model_response.py` | `response_id` | | | Cancel a Response running in the background |
|
| `cancel_model_response.py` | `response_id` | | | Cancel a Response running in the background |
|
||||||
| | `create_conversation.py` | | | | Create a new Conversation |
|
| `create_conversation.py` | | | | Create a new Conversation |
|
||||||
| | `create_image.py` | `model_id` | `prompt` | `image.png` | Create an image based on the prompt description and/or the image provided |
|
| `create_model_response.py` | `conversation_id` | `model_id` | `input_message` | Create a Response within a Conversation |
|
||||||
| | `create_model_response.py` | `conversation_id` | `model_id` | `input_message` | Create a Response within a Conversation |
|
| `create_text_item.py` | `conversation_id` | `input_message` | | Create a user input or instruction within a Conversation |
|
||||||
| | `create_text_item.py` | `conversation_id` | `input_message` | | Create a user input or instruction within a Conversation |
|
| `create_vector_store.py` | | | | Create a Vector Store |
|
||||||
| | `create_vector_store.py` | | | | Create a Vector Store |
|
| `create_vector_store_file.py` | `vector_store_id` | `file_id` | | Create a File in a Vector Store |
|
||||||
| | `create_vector_store_file.py` | `vector_store_id` | `file_id` | | Create a File in a Vector Store |
|
| `delete_conversation.py` | `conversation_id` | | | Delete a Conversation |
|
||||||
| | `delete_conversation.py` | `conversation_id` | | | Delete a Conversation |
|
| `delete_file.py` | `file_id` | | | Delete a File |
|
||||||
| | `delete_file.py` | `file_id` | | | Delete a File |
|
| `delete_item.py` | `conversation_id` | `item_id` | | Delete an Item (input or Response) from a Conversation |
|
||||||
| | `delete_item.py` | `conversation_id` | `item_id` | | Delete an Item (input or Response) from a Conversation |
|
| `delete_model_response.py` | `response_id` | | | Delete a Response |
|
||||||
| | `delete_model_response.py` | `response_id` | | | Delete a Response |
|
| `delete_vector_store.py` | `vector_store_id` | | | Delete a Vector Store |
|
||||||
| | `delete_vector_store.py` | `vector_store_id` | | | Delete a Vector Store |
|
| `delete_vector_store_file.py` | `vector_store_id` | `file_id` | | Delete a File from a Vector Store |
|
||||||
| | `delete_vector_store_file.py` | `vector_store_id` | `file_id` | | Delete a File from a Vector Store |
|
| `list_files.py` | | | | List all uploaded Files |
|
||||||
| | `list_files.py` | | | | List all uploaded Files |
|
| `list_input_items.py` | `response_id` | | | List all input Items used to generate a Response |
|
||||||
| | `list_input_items.py` | `response_id` | | | List all input Items used to generate a Response |
|
| `list_items.py` | `conversation_id` | | | List all Items (inputs or Responses) in a Conversation |
|
||||||
| | `list_items.py` | `conversation_id` | | | List all Items (inputs or Responses) in a Conversation |
|
| `list_vector_store_files.py` | `vector_store_id` | | | List all Files in a Vector Store |
|
||||||
| | `list_vector_store_files.py` | `vector_store_id` | | | List all Files in a Vector Store |
|
| `list_vector_stores.py` | | | | List all Vector Stores |
|
||||||
| | `list_vector_stores.py` | | | | List all Vector Stores |
|
| `retrieve_conversation.py` | `conversation_id` | | | Retrieve a Conversation |
|
||||||
| | `modify_vector_store.py` | `vector_store_id` | `metadata` | | Update a Vector Store metadata |
|
| `retrieve_file.py` | `file_id` | | | Retrieve a File |
|
||||||
| | `retrieve_conversation.py` | `conversation_id` | | | Retrieve a Conversation |
|
| `retrieve_file_content.py` | `file_id` | | | Retrieve the content of File |
|
||||||
| | `retrieve_file.py` | `file_id` | | | Retrieve a File |
|
| `retrieve_item.py` | `conversation_id` | `item_id` | | Retrieve an Item from a Conversation |
|
||||||
| | `retrieve_file_content.py` | `file_id` | | | Retrieve the content of File |
|
| `retrieve_model_response.py` | `response_id` | | | Retrieve a model Response |
|
||||||
| | `retrieve_item.py` | `conversation_id` | `item_id` | | Retrieve an Item from a Conversation |
|
| `retrieve_vector_store.py` | `vector_store_id` | | | Retrieve a Vector Store |
|
||||||
| | `retrieve_model_response.py` | `response_id` | | | Retrieve a model Response |
|
| `retrieve_vector_store_file.py` | `vector_store_id` | `file_id` | | Retrieve a File from a Vector Store |
|
||||||
| | `retrieve_vector_store.py` | `vector_store_id` | | | Retrieve a Vector Store |
|
| `retrieve_vector_store_file_content.py` | `vector_store_id` | `file_id` | | Retrieve the content of a File from a Vector Store |
|
||||||
| | `retrieve_vector_store_file.py` | `vector_store_id` | `file_id` | | Retrieve a File from a Vector Store |
|
| `update_conversation.py` | `conversation_id` | `metadata` | | Update a Conversation metadata |
|
||||||
| | `retrieve_vector_store_file_content.py` | `vector_store_id` | `file_id` | | Retrieve the content of a File from a Vector Store |
|
| `upload_file.py` | `filename` | `path-to-file` | | Upload a File to the OpenAI platform |
|
||||||
| | `update_conversation.py` | `conversation_id` | `metadata` | | Update a Conversation metadata |
|
|
||||||
| | `update_vector_store_file.py` | `vector_store_id` | `file_id` | `attributes` | Update a File attributes |
|
|
||||||
| | `upload_file.py` | `filename` | `path-to-file` | | Upload a File to the OpenAI platform |
|
|
||||||
|
|
||||||
* To execute these scripts run `python3 <script.py> <parameter_1> ...`.
|
* To execute these scripts run `python3 <script.py> <parameter_1> ...`.
|
||||||
|
|
|
@ -4,9 +4,6 @@
|
||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
| ------- | ------------------ |
|
| ------- | ------------------ |
|
||||||
| 0.5.x | :heavy_check_mark: |
|
|
||||||
| 0.4.x | :heavy_check_mark: |
|
|
||||||
| 0.3.x | :heavy_check_mark: |
|
|
||||||
| 0.2.x | :heavy_check_mark: |
|
| 0.2.x | :heavy_check_mark: |
|
||||||
| 0.1.x | :heavy_check_mark: |
|
| 0.1.x | :heavy_check_mark: |
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
base_url = "https://api.openai.com/v1/"
|
base_url = "https://api.openai.com/v1/"
|
||||||
secret_key = "YOUR-OPENAI-SECRET-KEY-HERE"
|
secret_key = "YOUR-OPENAI-SECRET-KEY-HERE"
|
||||||
vector_store = ["YOUR-VECTOR-STORE-IDs-HERE","SEPARATED-BY-COMMAS"]
|
vector_store = "YOUR-VECTOR-STORE-ID-HERE"
|
||||||
|
|
||||||
|
|
|
@ -1,57 +0,0 @@
|
||||||
from config import *
|
|
||||||
import requests
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
import base64
|
|
||||||
from PIL import Image
|
|
||||||
from io import BytesIO
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
def create_image(model,prompt,image_path=None):
|
|
||||||
# choose endpoint depending on whether an image is provided
|
|
||||||
if image_path:
|
|
||||||
url = base_url+"images/edits"
|
|
||||||
else:
|
|
||||||
url = base_url+"images/generations"
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Authorization': 'Bearer '+secret_key,
|
|
||||||
}
|
|
||||||
date_time_string = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
|
|
||||||
|
|
||||||
if image_path:
|
|
||||||
# For edits, use multipart/form-data
|
|
||||||
data = {
|
|
||||||
"prompt": prompt,
|
|
||||||
"model": model
|
|
||||||
}
|
|
||||||
files = {
|
|
||||||
"image": open(image_path, "rb")
|
|
||||||
}
|
|
||||||
response = requests.post(url, headers=headers, data=data, files=files)
|
|
||||||
else:
|
|
||||||
# For generations, use JSON
|
|
||||||
payload = json.dumps({
|
|
||||||
"prompt": prompt,
|
|
||||||
"model": model
|
|
||||||
})
|
|
||||||
headers['Content-Type'] = 'application/json'
|
|
||||||
response = requests.post(url, headers=headers, data=payload)
|
|
||||||
if response.status_code != 200:
|
|
||||||
print(f"Error: {response.status_code} - {response.text}")
|
|
||||||
sys.exit(1)
|
|
||||||
# handle response data: both generation and edits return b64_json in data[0]
|
|
||||||
decoded_bytes = base64.b64decode(response.json()["data"][0]["b64_json"])
|
|
||||||
byte_stream = BytesIO(decoded_bytes)
|
|
||||||
image = Image.open(byte_stream)
|
|
||||||
image.save("./output_images/output-"+date_time_string+".png", "PNG")
|
|
||||||
|
|
||||||
print("Image ./output_images/output-"+date_time_string+".png has been generated.")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
# usage: python create_image.py <model> <prompt> [image_path]
|
|
||||||
model = str(sys.argv[1]) if len(sys.argv) > 1 else ''
|
|
||||||
prompt = str(sys.argv[2]) if len(sys.argv) > 2 else ''
|
|
||||||
image_path = str(sys.argv[3]) if len(sys.argv) > 3 else None
|
|
||||||
create_image(model,prompt,image_path)
|
|
|
@ -10,7 +10,7 @@ def create_model_response(conversation,model,message):
|
||||||
"model": model,
|
"model": model,
|
||||||
"tools": [
|
"tools": [
|
||||||
{"type": "web_search"},
|
{"type": "web_search"},
|
||||||
{"type": "file_search", "vector_store_ids" :vector_store}
|
{"type": "file_search", "vector_store_ids" :[vector_store]}
|
||||||
],
|
],
|
||||||
"input": message,
|
"input": message,
|
||||||
"conversation": conversation
|
"conversation": conversation
|
||||||
|
@ -23,7 +23,5 @@ def create_model_response(conversation,model,message):
|
||||||
response = requests.request("POST", url, headers=headers, data=payload)
|
response = requests.request("POST", url, headers=headers, data=payload)
|
||||||
|
|
||||||
print(response.text)
|
print(response.text)
|
||||||
print(response.json()["output"][-1]["content"][0]["text"])
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
create_model_response(str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))
|
create_model_response(str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
from config import *
|
|
||||||
import requests
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
|
|
||||||
def modify_vector_store(vector_store_id,metadata):
|
|
||||||
url = base_url+"vector_stores/"+vector_store_id
|
|
||||||
|
|
||||||
payload = json.dumps({
|
|
||||||
"metadata": json.loads(metadata)
|
|
||||||
})
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
'Authorization': 'Bearer '+secret_key,
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.request("POST", url, headers=headers, data=payload)
|
|
||||||
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
modify_vector_store(str(sys.argv[1]),str(sys.argv[2]))
|
|
|
@ -13,7 +13,6 @@ def retrieve_item(conversation,message):
|
||||||
response = requests.request("GET", url, headers=headers, data=payload)
|
response = requests.request("GET", url, headers=headers, data=payload)
|
||||||
|
|
||||||
print(response.text)
|
print(response.text)
|
||||||
print(response.json()["content"][0]["text"])
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -13,7 +13,6 @@ def retrieve_model_response(response):
|
||||||
response = requests.request("GET", url, headers=headers, data=payload)
|
response = requests.request("GET", url, headers=headers, data=payload)
|
||||||
|
|
||||||
print(response.text)
|
print(response.text)
|
||||||
print(response.json()["output"][-1]["content"][0]["text"])
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
retrieve_model_response(str(sys.argv[1]))
|
retrieve_model_response(str(sys.argv[1]))
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
from config import *
|
|
||||||
import requests
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
|
|
||||||
def update_vector_store_file(vector_store_id,file_id,attributes):
|
|
||||||
url = base_url+"vector_stores/"+vector_store_id+"/files/"+file_id
|
|
||||||
|
|
||||||
payload = json.dumps({
|
|
||||||
"attributes": json.loads(attributes)
|
|
||||||
})
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
'Authorization': 'Bearer '+secret_key,
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.request("POST", url, headers=headers, data=payload)
|
|
||||||
|
|
||||||
print(response.text)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
update_vector_store_file(str(sys.argv[1]),str(sys.argv[2]),str(sys.argv[3]))
|
|
Loading…
Reference in a new issue