remove MNIST files and add Gemini-2.0-flash
This commit is contained in:
@@ -1,4 +1,10 @@
|
||||
{
|
||||
"gemini-2.0-flash":{
|
||||
"api_url": "https://generativelanguage.googleapis.com/v1beta/openai/",
|
||||
"api_key": "AIzaSyCiURTUJrEGw5J7HxtL1KRNT9GhNKCJsb0",
|
||||
"model": "gemini-2.0-flash"
|
||||
},
|
||||
|
||||
"deepseek-chat":{
|
||||
"api_url": "https://api.deepseek.com",
|
||||
"api_key": "sk-12165b127043441697a8940918e207ac",
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
mnist_cnn.pth
BIN
mnist_cnn.pth
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -23,7 +23,7 @@ def buffbot():
|
||||
st.code(f.read(), language="python")
|
||||
st.divider()
|
||||
# Select AI model for chatbot
|
||||
model_options = ["deepseek-r1:1.5b", "llama3.2:1b", "deepseek-chat", ]
|
||||
model_options = ["gemini-2.0-flash", "deepseek-r1:1.5b", "llama3.2:1b", "deepseek-chat", ]
|
||||
# on_change callback to clear chat history when model is changed
|
||||
selected_model = st.selectbox("**👉Please select a model to start**", model_options, on_change=clear_chat)
|
||||
|
||||
@@ -60,11 +60,19 @@ def buffbot():
|
||||
# The API key and base URL are loaded based on the selected model
|
||||
with open('app_config.json') as config_file:
|
||||
config = json.load(config_file)
|
||||
|
||||
if selected_model == "gemini-2.0-flash":
|
||||
api_base_url = config[selected_model]["api_url"]
|
||||
api_key = config[selected_model]["api_key"]
|
||||
st.info("Powered by the online [Gemini](https://deepmind.google/technologies/gemini/) API!\
|
||||
Just a heads up, you have 10 messages to use.")
|
||||
# Set the maximum number of user messages
|
||||
MAX_USER_MESSAGES = 10
|
||||
|
||||
# deepseek-chat model, online API
|
||||
if selected_model == "deepseek-chat":
|
||||
api_base_url = config[selected_model]["api_url"]
|
||||
api_key = config[selected_model]["api_key"]
|
||||
model = config[selected_model]["model"]
|
||||
st.info("Powered by the online [DeepSeek](https://www.deepseek.com/) API!\
|
||||
Just a heads up, you have 10 messages to use.")
|
||||
# Set the maximum number of user messages
|
||||
@@ -74,7 +82,6 @@ def buffbot():
|
||||
if selected_model == "llama3.2:1b":
|
||||
api_base_url = config[selected_model]["api_url"]
|
||||
api_key = config[selected_model]["api_key"]
|
||||
model = config[selected_model]["model"]
|
||||
st.info("Powered by local llama3.2:1b model via [Ollama](https://ollama.com/library/llama3.2:1b)!\
|
||||
Just a heads up, you have 100 messages to use.")
|
||||
MAX_USER_MESSAGES = 100
|
||||
@@ -82,7 +89,6 @@ def buffbot():
|
||||
if selected_model == "deepseek-r1:1.5b":
|
||||
api_base_url = config[selected_model]["api_url"]
|
||||
api_key = config[selected_model]["api_key"]
|
||||
model = config[selected_model]["model"]
|
||||
st.info("Powered by local deepseek-r1:1.5b model via [Ollama](https://ollama.com/library/deepseek-r1:1.5b)!\
|
||||
Just a heads up, you have 100 messages to use.")
|
||||
MAX_USER_MESSAGES = 100
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Reference in New Issue
Block a user