From 9e4460c009e8bae59c118435d7f15a19bcae0fef Mon Sep 17 00:00:00 2001 From: Hikari Haru Date: Tue, 3 Jan 2023 17:27:47 +0100 Subject: [PATCH 1/8] Added basic passthrough to /ask Update readme --- README.md | 2 +- cogs/gpt_3_commands_and_converser.py | 20 ++++++++++++++++---- models/openai_model.py | 1 + 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index b4b1d3c6..1f08f018 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ These commands are grouped, so each group has a prefix but you can easily tab co `/help` - Display help text for the bot -`/gpt ask ` Ask the GPT3 Davinci 003 model a question. +`/gpt ask ` Ask the GPT3 Davinci 003 model a question. Optional overrides available `/gpt converse` - Start a conversation with the bot, like ChatGPT diff --git a/cogs/gpt_3_commands_and_converser.py b/cogs/gpt_3_commands_and_converser.py index e7e99571..8dff2e38 100644 --- a/cogs/gpt_3_commands_and_converser.py +++ b/cogs/gpt_3_commands_and_converser.py @@ -558,7 +558,7 @@ async def on_message(self, message): # ctx can be of type AppContext(interaction) or Message async def encapsulated_send( - self, user_id, prompt, ctx, response_message=None, from_g_command=False + self, user_id, prompt, ctx, temp_override=None, top_p_override=None, frequency_penalty_override=None, presence_penalty_override=None, response_message=None, from_g_command=False ): new_prompt = prompt + "\nGPTie: " if not from_g_command else prompt @@ -619,7 +619,7 @@ async def encapsulated_send( return # Send the request to the model - response = await self.model.send_request(new_prompt, tokens=tokens) + response = await self.model.send_request(new_prompt, tokens=tokens, temp_override=temp_override, top_p_override=top_p_override, frequency_penalty_override=frequency_penalty_override, presence_penalty_override=presence_penalty_override) # Clean the request response response_text = str(response["choices"][0]["text"]) @@ -712,8 +712,20 @@ async def encapsulated_send( @discord.option( name="prompt", description="The prompt to send to GPT3", required=True ) + @discord.option( + name="temperature", description="Higher values means the model will take more risks.", required=False, input_type=float, min_value=0, max_value=1 + ) + @discord.option( + name="top_p", description="Higher values means the model will take more risks.", required=False, input_type=float, min_value=0, max_value=1 + ) + @discord.option( + name="frequency_penalty", description=" Decreasing the model's likelihood to repeat the same line verbatim.", required=False, input_type=float, min_value=-2, max_value=2 + ) + @discord.option( + name="presence_penalty", description=" Increasing the model's likelihood to talk about new topics.", required=False, input_type=float, min_value=-2, max_value=2 + ) @discord.guild_only() - async def ask(self, ctx: discord.ApplicationContext, prompt: str): + async def ask(self, ctx: discord.ApplicationContext, prompt: str, temperature: float, top_p:float, frequency_penalty: float, presence_penalty: float): await ctx.defer() user = ctx.user @@ -723,7 +735,7 @@ async def ask(self, ctx: discord.ApplicationContext, prompt: str): # Send the request to the model # If conversing, the prompt to send is the history, otherwise, it's just the prompt - await self.encapsulated_send(user.id, prompt, ctx, from_g_command=True) + await self.encapsulated_send(user.id, prompt, ctx, temp_override=temperature, top_p_override=top_p, frequency_penalty_override=frequency_penalty, presence_penalty_override=presence_penalty, from_g_command=True) @add_to_group("gpt") @discord.slash_command( diff --git a/models/openai_model.py b/models/openai_model.py index 6a6d8505..f6e85bcf 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -374,6 +374,7 @@ async def send_request( ) print("The prompt about to be sent is " + prompt) + print(f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}") async with aiohttp.ClientSession() as session: payload = { From 8fbfe952f8bd9882cf501df0ffdb56c609ab5ff0 Mon Sep 17 00:00:00 2001 From: Rene Teigen Date: Tue, 3 Jan 2023 20:03:01 +0000 Subject: [PATCH 2/8] Changed how the overrides are checked to allow "0" input --- models/openai_model.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/models/openai_model.py b/models/openai_model.py index f6e85bcf..96d2d6e3 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -380,16 +380,16 @@ async def send_request( payload = { "model": self.model, "prompt": prompt, - "temperature": self.temp if not temp_override else temp_override, - "top_p": self.top_p if not top_p_override else top_p_override, + "temperature": self.temp if temp_override is None else temp_override, + "top_p": self.top_p if top_p_override is None else top_p_override, "max_tokens": self.max_tokens - tokens if not max_tokens_override else max_tokens_override, "presence_penalty": self.presence_penalty - if not presence_penalty_override + if presence_penalty_override is None else presence_penalty_override, "frequency_penalty": self.frequency_penalty - if not frequency_penalty_override + if frequency_penalty_override is None else frequency_penalty_override, "best_of": self.best_of if not best_of_override else best_of_override, } From ddc23e7c10bcacb26379922188271129ccb6c583 Mon Sep 17 00:00:00 2001 From: Rene Teigen Date: Tue, 3 Jan 2023 20:15:27 +0000 Subject: [PATCH 3/8] Added some print to send_request and fixed the top_p description --- cogs/gpt_3_commands_and_converser.py | 8 ++++---- models/openai_model.py | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/cogs/gpt_3_commands_and_converser.py b/cogs/gpt_3_commands_and_converser.py index 8dff2e38..2a95c221 100644 --- a/cogs/gpt_3_commands_and_converser.py +++ b/cogs/gpt_3_commands_and_converser.py @@ -713,16 +713,16 @@ async def encapsulated_send( name="prompt", description="The prompt to send to GPT3", required=True ) @discord.option( - name="temperature", description="Higher values means the model will take more risks.", required=False, input_type=float, min_value=0, max_value=1 + name="temperature", description="Higher values means the model will take more risks", required=False, input_type=float, min_value=0, max_value=1 ) @discord.option( - name="top_p", description="Higher values means the model will take more risks.", required=False, input_type=float, min_value=0, max_value=1 + name="top_p", description="1 is greedy sampling, 0.1 means only considering the top 10% of probability distribution", required=False, input_type=float, min_value=0, max_value=1 ) @discord.option( - name="frequency_penalty", description=" Decreasing the model's likelihood to repeat the same line verbatim.", required=False, input_type=float, min_value=-2, max_value=2 + name="frequency_penalty", description="Decreasing the model's likelihood to repeat the same line verbatim", required=False, input_type=float, min_value=-2, max_value=2 ) @discord.option( - name="presence_penalty", description=" Increasing the model's likelihood to talk about new topics.", required=False, input_type=float, min_value=-2, max_value=2 + name="presence_penalty", description="Increasing the model's likelihood to talk about new topics", required=False, input_type=float, min_value=-2, max_value=2 ) @discord.guild_only() async def ask(self, ctx: discord.ApplicationContext, prompt: str, temperature: float, top_p:float, frequency_penalty: float, presence_penalty: float): diff --git a/models/openai_model.py b/models/openai_model.py index 96d2d6e3..5c430ca1 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -398,7 +398,8 @@ async def send_request( "https://api.openai.com/v1/completions", json=payload, headers=headers ) as resp: response = await resp.json() - print(response) + print(f"Payload -> {payload}") + print(f"Response -> {response}") # Parse the total tokens used for this request and response pair from the response tokens_used = int(response["usage"]["total_tokens"]) self.usage_service.update_usage(tokens_used) From 68a20dcc507cf04e10b682f41bc91479a236a095 Mon Sep 17 00:00:00 2001 From: Rene Teigen Date: Thu, 5 Jan 2023 20:18:32 +0000 Subject: [PATCH 4/8] Changed the image resizing to go by the guild filesize limit Since boosted servers have higher limits, even for bots --- cogs/draw_image_generation.py | 2 +- models/openai_model.py | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/cogs/draw_image_generation.py b/cogs/draw_image_generation.py index bb7fb119..7cc80a5f 100644 --- a/cogs/draw_image_generation.py +++ b/cogs/draw_image_generation.py @@ -47,7 +47,7 @@ async def encapsulated_send( try: file, image_urls = await self.model.send_image_request( - prompt, vary=vary if not draw_from_optimizer else None + ctx, prompt, vary=vary if not draw_from_optimizer else None ) except ValueError as e: ( diff --git a/models/openai_model.py b/models/openai_model.py index 71ffa5a2..f5bf94b7 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -410,7 +410,7 @@ async def send_request( return response - async def send_image_request(self, prompt, vary=None) -> tuple[File, list[Any]]: + async def send_image_request(self, ctx, prompt, vary=None) -> tuple[File, list[Any]]: # Validate that all the parameters are in a good state before we send the request words = len(prompt.split(" ")) if words < 3 or words > 75: @@ -533,17 +533,18 @@ async def send_image_request(self, prompt, vary=None) -> tuple[File, list[Any]]: ) # Print the filesize of new_im, in mega bytes - image_size = os.path.getsize(temp_file.name) / 1000000 + image_size = os.path.getsize(temp_file.name) / 1048576 + guild_file_limit = ctx.guild.filesize_limit / 1048576 # If the image size is greater than 8MB, we can't return this to the user, so we will need to downscale the # image and try again safety_counter = 0 - while image_size > 8: + while image_size > guild_file_limit: safety_counter += 1 if safety_counter >= 3: break print( - f"Image size is {image_size}MB, which is too large for discord. Downscaling and trying again" + f"Image size is {image_size}MB, which is too large for this server {guild_file_limit}MB. Downscaling and trying again" ) # We want to do this resizing asynchronously, so that it doesn't block the main thread during the resize. # We can use the asyncio.run_in_executor method to do this From 0835d849fc4c3bbaca7152e34257772531163eb4 Mon Sep 17 00:00:00 2001 From: Rene Teigen Date: Thu, 5 Jan 2023 21:41:05 +0000 Subject: [PATCH 5/8] Add fallback if guild is None --- models/openai_model.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/models/openai_model.py b/models/openai_model.py index f5bf94b7..5e736543 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -534,7 +534,10 @@ async def send_image_request(self, ctx, prompt, vary=None) -> tuple[File, list[A # Print the filesize of new_im, in mega bytes image_size = os.path.getsize(temp_file.name) / 1048576 - guild_file_limit = ctx.guild.filesize_limit / 1048576 + if ctx.guild is None: + guild_file_limit = 8 + else: + guild_file_limit = ctx.guild.filesize_limit / 1048576 # If the image size is greater than 8MB, we can't return this to the user, so we will need to downscale the # image and try again From e0150c526dd4a99e09ba1fb49178ab08411d6946 Mon Sep 17 00:00:00 2001 From: Rene Teigen Date: Thu, 5 Jan 2023 21:51:51 +0000 Subject: [PATCH 6/8] Fix ctx passthrough with draw button --- cogs/image_prompt_optimizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cogs/image_prompt_optimizer.py b/cogs/image_prompt_optimizer.py index 281b6632..c136e713 100644 --- a/cogs/image_prompt_optimizer.py +++ b/cogs/image_prompt_optimizer.py @@ -183,7 +183,7 @@ async def callback(self, interaction: discord.Interaction): await self.image_service_cog.encapsulated_send( user_id, prompt, - None, + interaction, msg, True, True, From 4a39685e45437b5495b581ccb03dedf4254b34b3 Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Thu, 5 Jan 2023 22:02:57 +0000 Subject: [PATCH 7/8] Format Python code with psf/black push --- cogs/gpt_3_commands_and_converser.py | 69 ++++++++++++++++++++++++---- models/openai_model.py | 4 +- 2 files changed, 64 insertions(+), 9 deletions(-) diff --git a/cogs/gpt_3_commands_and_converser.py b/cogs/gpt_3_commands_and_converser.py index ca9fa97f..d61ea752 100644 --- a/cogs/gpt_3_commands_and_converser.py +++ b/cogs/gpt_3_commands_and_converser.py @@ -574,7 +574,16 @@ async def on_message(self, message): # ctx can be of type AppContext(interaction) or Message async def encapsulated_send( - self, user_id, prompt, ctx, temp_override=None, top_p_override=None, frequency_penalty_override=None, presence_penalty_override=None, response_message=None, from_g_command=False + self, + user_id, + prompt, + ctx, + temp_override=None, + top_p_override=None, + frequency_penalty_override=None, + presence_penalty_override=None, + response_message=None, + from_g_command=False, ): new_prompt = prompt + "\nGPTie: " if not from_g_command else prompt @@ -635,7 +644,14 @@ async def encapsulated_send( return # Send the request to the model - response = await self.model.send_request(new_prompt, tokens=tokens, temp_override=temp_override, top_p_override=top_p_override, frequency_penalty_override=frequency_penalty_override, presence_penalty_override=presence_penalty_override) + response = await self.model.send_request( + new_prompt, + tokens=tokens, + temp_override=temp_override, + top_p_override=top_p_override, + frequency_penalty_override=frequency_penalty_override, + presence_penalty_override=presence_penalty_override, + ) # Clean the request response response_text = str(response["choices"][0]["text"]) @@ -737,19 +753,47 @@ async def encapsulated_send( name="prompt", description="The prompt to send to GPT3", required=True ) @discord.option( - name="temperature", description="Higher values means the model will take more risks", required=False, input_type=float, min_value=0, max_value=1 + name="temperature", + description="Higher values means the model will take more risks", + required=False, + input_type=float, + min_value=0, + max_value=1, ) @discord.option( - name="top_p", description="1 is greedy sampling, 0.1 means only considering the top 10% of probability distribution", required=False, input_type=float, min_value=0, max_value=1 + name="top_p", + description="1 is greedy sampling, 0.1 means only considering the top 10% of probability distribution", + required=False, + input_type=float, + min_value=0, + max_value=1, ) @discord.option( - name="frequency_penalty", description="Decreasing the model's likelihood to repeat the same line verbatim", required=False, input_type=float, min_value=-2, max_value=2 + name="frequency_penalty", + description="Decreasing the model's likelihood to repeat the same line verbatim", + required=False, + input_type=float, + min_value=-2, + max_value=2, ) @discord.option( - name="presence_penalty", description="Increasing the model's likelihood to talk about new topics", required=False, input_type=float, min_value=-2, max_value=2 + name="presence_penalty", + description="Increasing the model's likelihood to talk about new topics", + required=False, + input_type=float, + min_value=-2, + max_value=2, ) @discord.guild_only() - async def ask(self, ctx: discord.ApplicationContext, prompt: str, temperature: float, top_p:float, frequency_penalty: float, presence_penalty: float): + async def ask( + self, + ctx: discord.ApplicationContext, + prompt: str, + temperature: float, + top_p: float, + frequency_penalty: float, + presence_penalty: float, + ): await ctx.defer() user = ctx.user @@ -759,7 +803,16 @@ async def ask(self, ctx: discord.ApplicationContext, prompt: str, temperature: f # Send the request to the model # If conversing, the prompt to send is the history, otherwise, it's just the prompt - await self.encapsulated_send(user.id, prompt, ctx, temp_override=temperature, top_p_override=top_p, frequency_penalty_override=frequency_penalty, presence_penalty_override=presence_penalty, from_g_command=True) + await self.encapsulated_send( + user.id, + prompt, + ctx, + temp_override=temperature, + top_p_override=top_p, + frequency_penalty_override=frequency_penalty, + presence_penalty_override=presence_penalty, + from_g_command=True, + ) @add_to_group("gpt") @discord.slash_command( diff --git a/models/openai_model.py b/models/openai_model.py index 020de9a3..636c24d3 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -381,7 +381,9 @@ async def send_request( ) print("The prompt about to be sent is " + prompt) - print(f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}") + print( + f"Overrides -> temp:{temp_override}, top_p:{top_p_override} frequency:{frequency_penalty_override}, presence:{presence_penalty_override}" + ) async with aiohttp.ClientSession() as session: payload = { From 20bd3a970c3a6eafb36acf3ff49ca33d349b4842 Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Thu, 5 Jan 2023 22:07:53 +0000 Subject: [PATCH 8/8] Format Python code with psf/black push --- models/openai_model.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/models/openai_model.py b/models/openai_model.py index a0ee481f..2d3a27e4 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -414,7 +414,9 @@ async def send_request( return response - async def send_image_request(self, ctx, prompt, vary=None) -> tuple[File, list[Any]]: + async def send_image_request( + self, ctx, prompt, vary=None + ) -> tuple[File, list[Any]]: # Validate that all the parameters are in a good state before we send the request words = len(prompt.split(" ")) if words < 3 or words > 75: @@ -539,7 +541,7 @@ async def send_image_request(self, ctx, prompt, vary=None) -> tuple[File, list[A # Print the filesize of new_im, in mega bytes image_size = os.path.getsize(temp_file.name) / 1048576 if ctx.guild is None: - guild_file_limit = 8 + guild_file_limit = 8 else: guild_file_limit = ctx.guild.filesize_limit / 1048576