Skip to content

Commit

Permalink
Merge branch 'main' of https://github.com/Kav-K/GPT3Discord
Browse files Browse the repository at this point in the history
  • Loading branch information
Kav-K committed Jan 5, 2023
2 parents 79efb5f + 20bd3a9 commit aa1a82b
Show file tree
Hide file tree
Showing 5 changed files with 91 additions and 16 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ These commands are grouped, so each group has a prefix but you can easily tab co

`/help` - Display help text for the bot

`/gpt ask <prompt>` Ask the GPT3 Davinci 003 model a question.
`/gpt ask <prompt> <temp> <top_p> <frequency penalty> <presence penalty>` Ask the GPT3 Davinci 003 model a question. Optional overrides available

`/gpt converse` - Start a conversation with the bot, like ChatGPT

Expand Down
2 changes: 1 addition & 1 deletion cogs/draw_image_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ async def encapsulated_send(

try:
file, image_urls = await self.model.send_image_request(
prompt, vary=vary if not draw_from_optimizer else None
ctx, prompt, vary=vary if not draw_from_optimizer else None
)
except ValueError as e:
(
Expand Down
73 changes: 69 additions & 4 deletions cogs/gpt_3_commands_and_converser.py
Original file line number Diff line number Diff line change
Expand Up @@ -574,7 +574,16 @@ async def on_message(self, message):

# ctx can be of type AppContext(interaction) or Message
async def encapsulated_send(
self, user_id, prompt, ctx, response_message=None, from_g_command=False
self,
user_id,
prompt,
ctx,
temp_override=None,
top_p_override=None,
frequency_penalty_override=None,
presence_penalty_override=None,
response_message=None,
from_g_command=False,
):
new_prompt = prompt + "\nGPTie: " if not from_g_command else prompt

Expand Down Expand Up @@ -635,7 +644,14 @@ async def encapsulated_send(
return

# Send the request to the model
response = await self.model.send_request(new_prompt, tokens=tokens)
response = await self.model.send_request(
new_prompt,
tokens=tokens,
temp_override=temp_override,
top_p_override=top_p_override,
frequency_penalty_override=frequency_penalty_override,
presence_penalty_override=presence_penalty_override,
)

# Clean the request response
response_text = str(response["choices"][0]["text"])
Expand Down Expand Up @@ -736,8 +752,48 @@ async def encapsulated_send(
@discord.option(
name="prompt", description="The prompt to send to GPT3", required=True
)
@discord.option(
name="temperature",
description="Higher values means the model will take more risks",
required=False,
input_type=float,
min_value=0,
max_value=1,
)
@discord.option(
name="top_p",
description="1 is greedy sampling, 0.1 means only considering the top 10% of probability distribution",
required=False,
input_type=float,
min_value=0,
max_value=1,
)
@discord.option(
name="frequency_penalty",
description="Decreasing the model's likelihood to repeat the same line verbatim",
required=False,
input_type=float,
min_value=-2,
max_value=2,
)
@discord.option(
name="presence_penalty",
description="Increasing the model's likelihood to talk about new topics",
required=False,
input_type=float,
min_value=-2,
max_value=2,
)
@discord.guild_only()
async def ask(self, ctx: discord.ApplicationContext, prompt: str):
async def ask(
self,
ctx: discord.ApplicationContext,
prompt: str,
temperature: float,
top_p: float,
frequency_penalty: float,
presence_penalty: float,
):
await ctx.defer()

user = ctx.user
Expand All @@ -747,7 +803,16 @@ async def ask(self, ctx: discord.ApplicationContext, prompt: str):
# Send the request to the model
# If conversing, the prompt to send is the history, otherwise, it's just the prompt

await self.encapsulated_send(user.id, prompt, ctx, from_g_command=True)
await self.encapsulated_send(
user.id,
prompt,
ctx,
temp_override=temperature,
top_p_override=top_p,
frequency_penalty_override=frequency_penalty,
presence_penalty_override=presence_penalty,
from_g_command=True,
)

@add_to_group("gpt")
@discord.slash_command(
Expand Down
2 changes: 1 addition & 1 deletion cogs/image_prompt_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ async def callback(self, interaction: discord.Interaction):
await self.image_service_cog.encapsulated_send(
user_id,
prompt,
None,
interaction,
msg,
True,
True,
Expand Down
28 changes: 19 additions & 9 deletions models/openai_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,21 +381,24 @@ async def send_request(
)

print("The prompt about to be sent is " + prompt)
print(
f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}"
)

async with aiohttp.ClientSession() as session:
payload = {
"model": self.model,
"prompt": prompt,
"temperature": self.temp if not temp_override else temp_override,
"top_p": self.top_p if not top_p_override else top_p_override,
"temperature": self.temp if temp_override is None else temp_override,
"top_p": self.top_p if top_p_override is None else top_p_override,
"max_tokens": self.max_tokens - tokens
if not max_tokens_override
else max_tokens_override,
"presence_penalty": self.presence_penalty
if not presence_penalty_override
if presence_penalty_override is None
else presence_penalty_override,
"frequency_penalty": self.frequency_penalty
if not frequency_penalty_override
if frequency_penalty_override is None
else frequency_penalty_override,
"best_of": self.best_of if not best_of_override else best_of_override,
}
Expand All @@ -404,13 +407,16 @@ async def send_request(
"https://api.openai.com/v1/completions", json=payload, headers=headers
) as resp:
response = await resp.json()
print(response)
print(f"Payload -> {payload}")
print(f"Response -> {response}")
# Parse the total tokens used for this request and response pair from the response
await self.valid_text_request(response)

return response

async def send_image_request(self, prompt, vary=None) -> tuple[File, list[Any]]:
async def send_image_request(
self, ctx, prompt, vary=None
) -> tuple[File, list[Any]]:
# Validate that all the parameters are in a good state before we send the request
words = len(prompt.split(" "))
if words < 3 or words > 75:
Expand Down Expand Up @@ -533,17 +539,21 @@ async def send_image_request(self, prompt, vary=None) -> tuple[File, list[Any]]:
)

# Print the filesize of new_im, in mega bytes
image_size = os.path.getsize(temp_file.name) / 1000000
image_size = os.path.getsize(temp_file.name) / 1048576
if ctx.guild is None:
guild_file_limit = 8
else:
guild_file_limit = ctx.guild.filesize_limit / 1048576

# If the image size is greater than 8MB, we can't return this to the user, so we will need to downscale the
# image and try again
safety_counter = 0
while image_size > 8:
while image_size > guild_file_limit:
safety_counter += 1
if safety_counter >= 3:
break
print(
f"Image size is {image_size}MB, which is too large for discord. Downscaling and trying again"
f"Image size is {image_size}MB, which is too large for this server {guild_file_limit}MB. Downscaling and trying again"
)
# We want to do this resizing asynchronously, so that it doesn't block the main thread during the resize.
# We can use the asyncio.run_in_executor method to do this
Expand Down

0 comments on commit aa1a82b

Please sign in to comment.