Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] v2 #1005

Open
wants to merge 10 commits into
base: main
Choose a base branch
from
9 changes: 8 additions & 1 deletion docs/.vitepress/config.mts
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,14 @@ export default withPwa(defineConfig({
},
nav: [
{ text: 'Guide', link: '/get_started/intro' },
{ text: 'Use cases', link: '/use_cases/intro' }
{ text: 'Use cases', link: '/use_cases/intro' },
{
text: 'Version',
items: [
{ text: 'v1.18.4', link: 'https://v1.promptulate.cn' },
{ text: 'v2', link: '/' }
]
}
],
outline: {
level: [2, 3],
Expand Down
2 changes: 1 addition & 1 deletion docs/CNAME
Original file line number Diff line number Diff line change
@@ -1 +1 @@
www.promptulate.cn
promptulate.cn
2 changes: 1 addition & 1 deletion docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
layout: home

hero:
name: "Promptulate"
name: "Promptulate v2"
text: "🚀Lightweight AI Native development framework."
tagline: Build your LLM Agent Application in a pythonic way.
image:
Expand Down
3 changes: 3 additions & 0 deletions docs/other/log_system.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
# Log System

Promptualte has a built-in log system, which is used to record the running process of the program.

When you run promptulate components, all the logs are stored in a log folder. Promptulate divides the logs by date, which means that each day will have a separate log file.

You can find the logs in the following path:

- windows: /Users/username/.zeeland/pne/logs
- linux: /home/username/.zeeland/pne/logs
- macos: /Users/username/.zeeland/pne/logs
2,351 changes: 1,281 additions & 1,070 deletions poetry.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion promptulate/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ def chat(
model: Optional[str] = None,
model_config: Optional[dict] = None,
tools: Optional[List[ToolTypes]] = None,
output_schema: Optional[type(BaseModel)] = None,
output_schema: Optional[Type[BaseModel]] = None,
examples: Optional[List[BaseModel]] = None,
return_raw_response: bool = False,
custom_llm: Optional[BaseLLM] = None,
Expand Down
68 changes: 5 additions & 63 deletions promptulate/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,20 +24,13 @@
from dotenv import load_dotenv

from promptulate.hook.stdout_hook import StdOutHook
from promptulate.utils.core_utils import get_cache
from promptulate.utils.logger import logger
from promptulate.utils.openai_key_pool import OpenAIKeyPool
from promptulate.utils.singleton import Singleton

PROXY_MODE = ["off", "custom", "promptulate"]
load_dotenv()


def set_enable_cache(value: bool):
"""Caching is enabled by default. Disabling caching is not recommended."""
Config().enable_cache = value


def turn_off_stdout_hook():
Config().turn_off_stdout_hook()

Expand All @@ -47,7 +40,6 @@ class Config(metaclass=Singleton):
Configuration class for managing application settings.

Attributes:
enable_cache (bool): Flag indicating whether caching is enabled.
_proxy_mode (str): The current proxy mode.
_proxies (Optional[dict]): Dictionary of proxy settings.
openai_chat_api_url (str): URL for OpenAI chat API.
Expand All @@ -58,13 +50,12 @@ class Config(metaclass=Singleton):
ernie_bot_token_url (str): URL for Ernie bot token API.
ernie_bot_4_url (str): URL for Ernie bot 4 API.
ernie_embedding_v1_url (str): URL for Ernie embedding v1 API.
key_default_retry_times (int): Default number of key retry times.
max_retries (int): Maximum number of retries for llm if it fails.
enable_stdout_hook (bool): Flag indicating whether stdout hook is enabled.
"""

def __init__(self):
logger.info("[pne config] Config initialization")
self.enable_cache: bool = True
self._proxy_mode: str = PROXY_MODE[0]
self._proxies: Optional[dict] = None

Expand All @@ -80,8 +71,7 @@ def __init__(self):

self.zhipu_model_url = "https://open.bigmodel.cn/api/paas/v4/chat/completions"

self.key_default_retry_times = 5
"""If llm(like OpenAI) unable to obtain data, retry request until the data is obtained.""" # noqa: E501
self.max_retries = 3
self.enable_stdout_hook = True

if self.enable_stdout_hook:
Expand All @@ -92,43 +82,21 @@ def turn_off_stdout_hook(self):
self.enable_stdout_hook = False
StdOutHook.unregister_stdout_hooks()

def get_openai_api_key(self, model: str = "gpt-3.5-turbo") -> str:
"""Get openai key from KeyPool and environment variable. The priority of
obtaining the key: environment variable, cache of OpenAIKeyPool, cache of
OEPNAI_API_KEY."""
def get_openai_api_key(self) -> str:
"""Get OpenAI API key from environment variable."""
if "OPENAI_API_KEY" in os.environ.keys():
if self.enable_cache:
get_cache()["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
return os.getenv("OPENAI_API_KEY")

if self.enable_cache:
openai_key_pool: OpenAIKeyPool = OpenAIKeyPool()
key = openai_key_pool.get(model)
if key:
return key

if self.enable_cache and "OPENAI_API_KEY" in get_cache():
return get_cache()["OPENAI_API_KEY"]

raise ValueError("OPENAI API key is not provided. Please set your key.")

def get_ernie_api_key(self) -> str:
if "ERNIE_API_KEY" in os.environ.keys():
if self.enable_cache:
get_cache()["ERNIE_API_KEY"] = os.getenv("ERNIE_API_KEY")
return os.getenv("ERNIE_API_KEY")
if self.enable_cache and "ERNIE_API_KEY" in get_cache():
return get_cache()["ERNIE_API_KEY"]
raise ValueError("ERNIE_API_KEY is not provided. Please set your key.")

@property
def get_ernie_api_secret(self) -> str:
if "ERNIE_API_SECRET" in os.environ.keys():
if self.enable_cache:
get_cache()["ERNIE_API_SECRET"] = os.getenv("ERNIE_API_SECRET")
return os.getenv("ERNIE_API_SECRET")
if self.enable_cache and "ERNIE_API_SECRET" in get_cache():
return get_cache()["ERNIE_API_SECRET"]
raise ValueError("ERNIE_API_SECRET is not provided. Please set your secret.")

def get_zhipuai_api_key(self) -> str:
Expand All @@ -137,11 +105,7 @@ def get_zhipuai_api_key(self) -> str:
)

if "ZHIPUAI_API_KEY" in os.environ.keys():
if self.enable_cache:
get_cache()["ZHIPUAI_API_KEY"] = os.getenv("ZHIPUAI_API_KEY")
return os.getenv("ZHIPUAI_API_KEY")
if self.enable_cache and "ZHIPUAI_API_KEY" in get_cache():
return get_cache()["ZHIPUAI_API_KEY"]
raise ValueError("ZHIPUAI_API_KEY is not provided. Please set your key.")

def get_qianfan_ak(self):
Expand All @@ -150,11 +114,7 @@ def get_qianfan_ak(self):
)

if "QIANFAN_ACCESS_KEY" in os.environ.keys():
if self.enable_cache:
get_cache()["QIANFAN_ACCESS_KEY"] = os.getenv("QIANFAN_ACCESS_KEY")
return os.getenv("QIANFAN_ACCESS_KEY")
if self.enable_cache and "QIANFAN_ACCESS_KEY" in get_cache():
return get_cache()["QIANFAN_ACCESS_KEY"]
raise ValueError("QIANFAN_ACCESS_KEY is not provided. Please set your key.")

def get_qianfan_sk(self):
Expand All @@ -163,11 +123,7 @@ def get_qianfan_sk(self):
)

if "QIANFAN_SECRET_KEY" in os.environ.keys():
if self.enable_cache:
get_cache()["QIANFAN_SECRET_KEY"] = os.getenv("QIANFAN_SECRET_KEY")
return os.getenv("QIANFAN_SECRET_KEY")
if self.enable_cache and "QIANFAN_SECRET_KEY" in get_cache():
return get_cache()["QIANFAN_SECRET_KEY"]
raise ValueError("QIANFAN_SECRET_KEY is not provided. Please set your secret.")

def get_ernie_token(self) -> str:
Expand All @@ -183,35 +139,21 @@ def get_ernie_token(self) -> str:
}
return str(requests.post(url, params=params).json().get("access_token"))

def get_key_retry_times(self, model: str) -> int:
if self.enable_cache:
openai_key_pool: OpenAIKeyPool = OpenAIKeyPool()
return openai_key_pool.get_num(model)
return self.key_default_retry_times

@property
def proxy_mode(self) -> str:
if self.enable_cache and "PROXY_MODE" in get_cache():
return get_cache()["PROXY_MODE"]
return self._proxy_mode

@proxy_mode.setter
def proxy_mode(self, value):
self._proxy_mode = value
if self.enable_cache:
get_cache()["PROXY_MODE"] = value

@property
def proxies(self) -> Optional[dict]:
if self.enable_cache and "PROXIES" in get_cache():
return get_cache()["PROXIES"] if self.proxy_mode == "custom" else None
return self._proxies
return self._proxies if self.proxy_mode == "custom" else None

@proxies.setter
def proxies(self, value):
self._proxies = value
if self.enable_cache:
get_cache()["PROXIES"] = value

@property
def openai_chat_request_url(self) -> str:
Expand Down
23 changes: 0 additions & 23 deletions promptulate/frameworks/__init__.py

This file was deleted.

23 changes: 0 additions & 23 deletions promptulate/frameworks/conversation/__init__.py

This file was deleted.

Loading
Loading