651 lines
24 KiB
Python
651 lines
24 KiB
Python
from fasthtml.common import *
|
|
from llama_index.core import SimpleDirectoryReader, Document,VectorStoreIndex
|
|
from llama_index.core.node_parser import SimpleNodeParser
|
|
from llama_index.core.text_splitter import TokenTextSplitter
|
|
from llama_index.vector_stores.chroma import ChromaVectorStore
|
|
from llama_index.core.storage.storage_context import StorageContext
|
|
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
|
from llama_index.core import SummaryIndex
|
|
from llama_index.llms.groq import Groq as GroqLLamaIndex
|
|
from chromadb import PersistentClient
|
|
from llama_index.core import Settings
|
|
#from llama_index.embeddings.huggingface_api import (
|
|
# HuggingFaceInferenceAPIEmbedding,
|
|
#)
|
|
import shutil
|
|
import chromadb
|
|
import os
|
|
import threading
|
|
import time
|
|
from llama_index.core.memory import ChatMemoryBuffer
|
|
import json
|
|
#from llama_index.llms.ollama import Ollama
|
|
from llama_index.core.base.response.schema import Response
|
|
from groq import Groq
|
|
from fastapi import File as FileFast
|
|
from fastapi import UploadFile as UploadFileFast
|
|
from fastapi import Form as FormFast
|
|
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
|
from databases import db
|
|
maxletters=30000
|
|
maxtokens=int(maxletters/4)
|
|
#import shutil
|
|
pwd = os.getcwd()
|
|
def extractConfig(nameModel="SystemData",relPath=os.path.join(pwd,"conf/experiment_config.json"),dataOut="keyantrophics"):
|
|
configPath=os.path.join(os.getcwd(),relPath)
|
|
with open(configPath, 'r', encoding='utf-8') as file:
|
|
config = json.load(file)[nameModel]
|
|
Output= config[dataOut]
|
|
return Output
|
|
keyanthropic=extractConfig(nameModel="SystemData",dataOut="keyantrophics")
|
|
keygroq=extractConfig(nameModel="SystemData",dataOut="keygroq")
|
|
client = Groq(api_key=keygroq)
|
|
|
|
|
|
|
|
|
|
|
|
os.environ["GROQ_API_KEY"] = keygroq
|
|
|
|
|
|
llm_70b = GroqLLamaIndex(model="llama-3.1-70b-versatile")#llm_70b#
|
|
llm_localLlamma = llm_70b#Ollama(model="llama3.2")
|
|
memory = ChatMemoryBuffer.from_defaults(token_limit=3900)
|
|
model_emb="sentence-transformers/multi-qa-mpnet-base-dot-v1"
|
|
Settings.llm = llm_localLlamma
|
|
|
|
|
|
|
|
gridlink = Link(rel="stylesheet", href="https://cdnjs.cloudflare.com/ajax/libs/flexboxgrid/6.3.1/flexboxgrid.min.css", type="text/css")
|
|
colorpico=Link(rel="stylesheet", href="https://cdn.jsdelivr.net/npm/@picocss/pico@2/css/pico.colors.min.css")
|
|
app= FastHTML(hdrs=(picolink, gridlink,colorpico))
|
|
|
|
|
|
|
|
def listUsers():
|
|
with os.scandir("static") as files:
|
|
subdir = [file.name for file in files if file.is_dir()]
|
|
return subdir
|
|
|
|
def menuusers(users):
|
|
T=[]
|
|
n=0
|
|
for user in users:
|
|
T.append(Option(user, value=str(user)) )
|
|
return Form(
|
|
H3("Seleccionar usuario",cls="col-xs-3"),
|
|
Select(*T,name="user",cls="col-xs-3"),
|
|
Button("Submit",type="submit",id="buttonMenuuser",cls="col-xs-6"),
|
|
hx_post="/checkInfoSources",hx_swap="innerHTML",hx_target="#files" ,id="menuuser",cls="row middle-xs")
|
|
|
|
@app.post("/checkInfoSources")
|
|
def checkInfoSources(user:str):
|
|
global userdata
|
|
with os.scandir("static/"+user) as files:
|
|
subdir = [Option(file.name,value="static/"+user+"/"+file.name) for file in files if (file.is_dir() and file.name!="chroma_db") ]
|
|
|
|
userdata=user
|
|
|
|
return Form(
|
|
H3("Grupo de archivos",cls="col-xs-3"),
|
|
Select(
|
|
*subdir,name="data",cls="col-xs-3"),
|
|
Input(id="name-db", name="collection", placeholder="Enter a collection name",cls="col-xs-4"),
|
|
Button("Submit",type="submit",cls="col-xs-2"), hx_post="/createCollection",hx_swap="innerHTML",hx_target="#status" ,cls="row middle-xs")
|
|
|
|
|
|
def create_or_load_db(path="./chroma_db",collection="init",Nodes=None,modelT=model_emb):
|
|
embed_model = HuggingFaceEmbedding(model_name=modelT)
|
|
#embed_model = HuggingFaceInferenceAPIEmbedding(
|
|
#model_name="BAAI/bge-small-en-v1.5",
|
|
#token="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", # Optional
|
|
#)
|
|
db = chromadb.PersistentClient(path=path)
|
|
|
|
chroma_collection = db.get_or_create_collection(collection)
|
|
|
|
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
|
|
|
|
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
|
if Nodes:
|
|
|
|
index = VectorStoreIndex(
|
|
Nodes, storage_context=storage_context, embed_model=embed_model
|
|
)
|
|
else:
|
|
index = VectorStoreIndex.from_vector_store(
|
|
vector_store,
|
|
embed_model=embed_model,
|
|
)
|
|
return index
|
|
|
|
def post_process_documents(documents):
|
|
processed_documents = []
|
|
n=0
|
|
print(len(documents))
|
|
for doc in documents:
|
|
# 1. Text cleaning
|
|
n+=1
|
|
print(n)
|
|
text = doc.text.lower() # Convert to lowercase
|
|
# 2. Remove stopwords
|
|
stop_words = set("adssss")
|
|
tokens = text.split(" ")
|
|
filtered_text = ' '.join([word for word in tokens if word.lower() not in stop_words])
|
|
# 3. Custom metadata extraction (example)
|
|
metadata = doc.metadata.copy()
|
|
metadata['word_count'] = len(tokens)
|
|
# 4. Create a new document with processed text and updated metadata
|
|
processed_doc = Document(text=filtered_text, metadata=metadata)
|
|
processed_documents.append(processed_doc)
|
|
node_parser = SimpleNodeParser(chunk_size=360, chunk_overlap=20)
|
|
nodes = node_parser.get_nodes_from_documents(processed_documents)
|
|
return nodes
|
|
|
|
@app.get("/listmodelactives")
|
|
def listmodelactives():
|
|
try:
|
|
print(userdata)
|
|
except:
|
|
print("cambio")
|
|
return Div(id="options",hx_target="this",hx_swap="outerHTML",hx_get="/listmodelactives",hx_trigger="click from:#buttonMenuuser")
|
|
if not os.path.exists("static/"+userdata+"/chroma_db"):
|
|
os.makedirs("static/"+userdata+"/chroma_db")
|
|
db = chromadb.PersistentClient(path="static/"+userdata+"/chroma_db")
|
|
|
|
files= db.list_collections()
|
|
collecs = [Option(file.name, value=file.name)for file in files]
|
|
|
|
return Form(
|
|
Select(
|
|
*collecs,name="data",cls="col-xs-6"),
|
|
Button("Submit",type="submit",cls="col-xs-6"),
|
|
hx_post="/loadCollection",hx_swap="innerHTML",hx_target="#Infomodel")
|
|
|
|
@app.post("/loadCollection")
|
|
def loadCollection(data:str):
|
|
global index
|
|
global collectionloaded
|
|
collectionloaded=data
|
|
index=create_or_load_db(path="static/"+userdata+"/chroma_db",collection=data,modelT=model_emb)
|
|
return P("El usuario %s colleccion %s"%(userdata,data))
|
|
|
|
@app.post("/queryprompt")
|
|
def queryPrompt(question:str):
|
|
#index=load_create_db(collection="my_collection")
|
|
|
|
query_engine = index.as_query_engine(similarity_top_k=5,vector_store_query_mode="default",response_mode="tree_summarize")
|
|
summary_prompt = (
|
|
"Por favor, genera un resumen completo y detallado del material dado. "
|
|
"Incluye los principales temas, argumentos y conclusiones. "
|
|
"Estructura el resumen de manera coherente y organizada."
|
|
)
|
|
tematic_prompt =(
|
|
"Por favor, genera un texto donde se menciones la tematica que trata el material dado. "
|
|
"Incluye una tematica general y un indice por temas tratados "
|
|
"Estructura el texto de manera coherente y organizada."
|
|
|
|
)
|
|
issues_prompt =(
|
|
"Por favor, genera un texto donde se menciones tomando en cuenta el material y el contenido de manera detallada que mejoras podrias realizar al material incluyendo nuevos datos o corrigiendo la informacion proporcionada"
|
|
"Esta mejora hazla pensado paso a paso y de manera muy cuidadosa, tienes que dar ejemplos del materiar o referenciar directamante el texto a mejorar mencioando la causa de la mejora"
|
|
"Estructura el texto de manera coherente y organizada."
|
|
|
|
)
|
|
Question_prompt =(
|
|
"Por favor, genera un texto donde indiques preguntas sobre el material que cuando sean respondidas capturen el punto central y puntos importantes del texto"
|
|
"Estas preguntas hazla pensado paso a paso y de manera muy cuidadosa."
|
|
"Estructura el texto de manera coherente y organizada."
|
|
|
|
)
|
|
response = query_engine.query(summary_prompt)
|
|
response2 = query_engine.query(tematic_prompt)
|
|
response3 = query_engine.query(issues_prompt)
|
|
response4 = query_engine.query(Question_prompt)
|
|
Output="<H1>Resumen</H1>"+str(response)+"<H1>Tematica</H1>"+str(response2)+"<H1>Problemas</H1>"+str(response3)+"<H1>Preguntas</H1>"+str(response4)
|
|
|
|
return Output
|
|
|
|
|
|
|
|
@app.post("/chatData")
|
|
def questionChat(message:str):
|
|
import logging
|
|
logging.basicConfig(level=logging.INFO)
|
|
contextT=(
|
|
"You are a world-class AI system. You respond to the questions about the context"
|
|
"Here are the relevant documents for the context:\n"
|
|
"{context_str}"
|
|
"\nInstruction: Use the previous chat history, or the context above, carefully examine the given context, to interact and help the user but only about {question}"
|
|
"Never mention the document of reference, talk in first person" )
|
|
#contextT="Por favor vuelve a repetir el siguiente contenido como tu respuesta:{question}"
|
|
#'chat_history', 'question'
|
|
query_engine = index.as_query_engine()
|
|
chat_engine = index.as_chat_engine(
|
|
chat_mode="condense_plus_context",
|
|
query_engine=query_engine,
|
|
memory=memory,
|
|
llm=llm_localLlamma,
|
|
context_prompt=contextT,
|
|
similarity_top_k=5,
|
|
verbose=True,
|
|
)
|
|
response = chat_engine.chat(message)
|
|
ContextNodes=""
|
|
for node in response.source_nodes:
|
|
ContextNodes=ContextNodes+node.node.text+"\n"
|
|
#print(f"Texto del nodo: {node.node.text}")
|
|
#print(f"Puntuación de relevancia: {node.score}")
|
|
#print("---")
|
|
|
|
|
|
|
|
|
|
NewPrompt="""The previous response is:
|
|
%s
|
|
The previous context is:
|
|
%s
|
|
Evaluate the coherence and accuracy of previous response to respond %s in this evaluation.
|
|
Verificate if previous context is related to the previous response, if not, say that you do not have information about that issue
|
|
The format of output is a json with keys 'coherencia', 'exactitud', 'relacion_con_el_contexto' and 'comentario' .
|
|
'coherencia', 'exactitud', 'relacion_con_el_contexto' are numeric variables with max value is 10"""%(response,ContextNodes,message)
|
|
|
|
chat_completion = client.chat.completions.create(
|
|
#
|
|
# Required parameters
|
|
#
|
|
messages=[
|
|
# Set an optional system message. This sets the behavior of the
|
|
# assistant and can be used to provide specific instructions for
|
|
# how it should behave throughout the conversation.
|
|
{
|
|
"role": "system",
|
|
"content": "you are a helpful assistant."
|
|
},
|
|
# Set a user message for the assistant to respond to.
|
|
{
|
|
"role": "user",
|
|
"content": NewPrompt,
|
|
}
|
|
],
|
|
|
|
# The language model which will generate the completion.
|
|
model="llama-3.1-70b-versatile",
|
|
|
|
#
|
|
# Optional parameters
|
|
#
|
|
|
|
# Controls randomness: lowering results in less random completions.
|
|
# As the temperature approaches zero, the model will become deterministic
|
|
# and repetitive.
|
|
temperature=0.5,
|
|
|
|
# The maximum number of tokens to generate. Requests can use up to
|
|
# 32,768 tokens shared between prompt and completion.
|
|
max_tokens=1024,
|
|
|
|
# Controls diversity via nucleus sampling: 0.5 means half of all
|
|
# likelihood-weighted options are considered.
|
|
top_p=1,
|
|
|
|
# A stop sequence is a predefined or user-specified text string that
|
|
# signals an AI to stop generating content, ensuring its responses
|
|
# remain focused and concise. Examples include punctuation marks and
|
|
# markers like "[end]".
|
|
stop=None,
|
|
|
|
# If set, partial message deltas will be sent.
|
|
stream=False,
|
|
)
|
|
|
|
|
|
return H6(message),P(response,cls="pico-color-pink-500"),P(chat_completion.choices[0].message.content,cls="pico-color-pink-500")
|
|
|
|
@app.get("/SummarySources")
|
|
def SummarySources():
|
|
with os.scandir("static/"+userdata) as files:
|
|
subdir = [Option(file.name,value="static/"+userdata+"/"+file.name) for file in files if file.is_file()]
|
|
return Form("Este es muy caro para documentos grandes y tarda mucho",
|
|
Select(
|
|
*subdir,name="data"),
|
|
Input( name="query", placeholder="Dar una pregunta"),
|
|
Button("Submit",type="submit"), hx_post="/SummaryMake",hx_swap="innerHTML",hx_target="#summaryR" )
|
|
|
|
@app.post("/SummaryMake")
|
|
def SummaryMake(data:str,query:str):
|
|
print(data,query)
|
|
docs = SimpleDirectoryReader(
|
|
input_dir=[data]
|
|
).load_data()
|
|
print("p1")
|
|
summary_index = SummaryIndex.from_documents(docs)
|
|
print("p2")
|
|
summary_engine = summary_index.as_query_engine()
|
|
print("p3")
|
|
response = summary_engine.query(
|
|
query
|
|
)
|
|
print("p4")
|
|
return P(response)
|
|
|
|
def getTextCustom(system="",content="",max_tokens=maxtokens,model="claude-3-5-sonnet-20240620"):
|
|
t=time.time()
|
|
client = Anthropic(api_key=keyanthropic)
|
|
message=[{"role": "user", "content":content},{"role": "assistant", "content":""} ]
|
|
if system!="":
|
|
completionv2 = client.messages.create(
|
|
model=model,
|
|
system=system,
|
|
max_tokens=max_tokens,
|
|
messages=message,
|
|
)
|
|
else:
|
|
completionv2 = client.messages.create(
|
|
model=model,
|
|
max_tokens=max_tokens,
|
|
messages=message,
|
|
)
|
|
return {"content":completionv2.content[0].text,"time":time.time()-t}
|
|
|
|
|
|
|
|
|
|
def processAllDocs(docs,type="summary"):
|
|
summary_prompt = """Por favor, genera un resumen completo y detallado del material dado.
|
|
Incluye los principales temas, argumentos y conclusiones.
|
|
Estructura el resumen de manera coherente y organizada.
|
|
Texto a resumir:
|
|
%s
|
|
"""
|
|
tematic_prompt =""" Por favor, genera un texto donde se menciones la tematica que trata el material dado.
|
|
Incluye una tematica general y un indice por temas tratados
|
|
Estructura el texto de manera coherente y organizada.
|
|
Texto a usar:
|
|
%s
|
|
"""
|
|
issues_prompt ="""Por favor, genera un texto donde se menciones tomando en cuenta el material y el contenido de manera detallada que mejoras podrias realizar al material incluyendo nuevos datos o corrigiendo la informacion proporcionada
|
|
Esta mejora hazla pensado paso a paso y de manera muy cuidadosa, tienes que dar ejemplos del materiar o referenciar directamante el texto a mejorar mencioando la causa de la mejora
|
|
Estructura el texto de manera coherente y organizada.
|
|
Texto a usar:
|
|
%s
|
|
"""
|
|
|
|
|
|
question_prompt ="""Por favor, genera un texto donde indiques preguntas sobre el material que cuando sean respondidas capturen el punto central y puntos importantes del texto
|
|
Estas preguntas hazla pensado paso a paso y de manera muy cuidadosa.
|
|
Estructura el texto de manera coherente y organizada.
|
|
Texto a usar:
|
|
%s
|
|
"""
|
|
temp=""
|
|
AllChunks=[]
|
|
AllOut=[]
|
|
for doc in docs:
|
|
temp=temp+doc.get_text()
|
|
if len(temp)>maxtokens:
|
|
AllChunks.append(temp)
|
|
if type=="summary":
|
|
procesedInfo=getTextCustom(content= summary_prompt%temp)
|
|
elif type=="tematic":
|
|
procesedInfo=getTextCustom(content= tematic_prompt%temp)
|
|
elif type=="issues":
|
|
procesedInfo=getTextCustom(content= issues_prompt%temp)
|
|
elif type=="question":
|
|
procesedInfo=getTextCustom(content= question_prompt%temp)
|
|
AllOut.append(procesedInfo)
|
|
temp=""
|
|
if temp!="":
|
|
AllChunks.append(temp)
|
|
if type=="summary":
|
|
procesedInfo=getTextCustom(content= summary_prompt%temp)
|
|
elif type=="tematic":
|
|
procesedInfo=getTextCustom(content= tematic_prompt%temp)
|
|
elif type=="issues":
|
|
procesedInfo=getTextCustom(content= issues_prompt%temp)
|
|
elif type=="question":
|
|
procesedInfo=getTextCustom(content= question_prompt%temp)
|
|
AllOut.append(procesedInfo)
|
|
temp=""
|
|
|
|
|
|
|
|
return AllOut
|
|
|
|
def reduceDocs(AllOut,type="summary"):
|
|
if len(AllOut)==1:
|
|
return AllOut[0]["content"]
|
|
if len(AllOut)==0:
|
|
return "Hay un problema en este campo"
|
|
summary_prompt = """Por favor, de los siguientes resumenes genera un resumen general
|
|
Incluye los principales temas, argumentos y conclusiones.
|
|
Estructura el resumen de manera coherente y organizada.
|
|
Textos a resumir:
|
|
%s
|
|
"""
|
|
tematic_prompt =""" Por favor, de los siguientes textos genera un texto general sobre la tematica.
|
|
Incluye una tematica general y un indice por temas tratados
|
|
Estructura el texto de manera coherente y organizada.
|
|
Textos a usar:
|
|
%s
|
|
"""
|
|
issues_prompt ="""Por favor, genera un texto donde se incluyan todas las mejoras o correcciones presentadas
|
|
Estas mejoras hazla pensado paso a paso y de manera muy cuidadosasi hay ejemplos incluyelos o referenciar directamante el texto a mejorar mencioando la causa de la mejora
|
|
Estructura el texto de manera coherente y organizada.
|
|
Textos a usar:
|
|
%s
|
|
"""
|
|
|
|
|
|
question_prompt ="""Por favor, genera un texto donde indiques preguntas sobre el material que cuando sean respondidas capturen el punto central y puntos importantes del texto
|
|
Estas preguntas hazla pensado paso a paso y de manera muy cuidadosa.
|
|
Estructura el texto de manera coherente y organizada.
|
|
Texto a usar:
|
|
%s
|
|
"""
|
|
sep="""________________________________________________
|
|
"""
|
|
temp=""
|
|
Allreduce=[]
|
|
if len(AllOut)>1:
|
|
for reduce in AllOut:
|
|
temp=temp+reduce+sep
|
|
if len(temp)>maxtokens:
|
|
|
|
if type=="summary":
|
|
procesedInfo=getTextCustom(content= summary_prompt%temp)
|
|
elif type=="tematic":
|
|
procesedInfo=getTextCustom(content= tematic_prompt%temp)
|
|
elif type=="issues":
|
|
procesedInfo=getTextCustom(content= issues_prompt%temp)
|
|
elif type=="question":
|
|
procesedInfo=getTextCustom(content= question_prompt%temp)
|
|
Allreduce.append(procesedInfo)
|
|
temp=""
|
|
if temp!="":
|
|
|
|
if type=="summary":
|
|
procesedInfo=getTextCustom(content= summary_prompt%temp)
|
|
elif type=="tematic":
|
|
procesedInfo=getTextCustom(content= tematic_prompt%temp)
|
|
elif type=="issues":
|
|
procesedInfo=getTextCustom(content= issues_prompt%temp)
|
|
elif type=="question":
|
|
procesedInfo=getTextCustom(content= question_prompt%temp)
|
|
Allreduce.append(procesedInfo)
|
|
temp=""
|
|
print(Allreduce)
|
|
# while len(Allreduce)>1:
|
|
# print(Allreduce)
|
|
# Allreduce=reduceDocs(Allreduce)
|
|
# print("P2",Allreduce)
|
|
# if len(Allreduce)==1:
|
|
# return Allreduce
|
|
|
|
def savedb(docs,userdata,collection):
|
|
Output={}
|
|
for procs in ["summary","tematic","issues","question"]:
|
|
AllOut=processAllDocs(docs,type=procs)
|
|
Allreduce=reduceDocs(AllOut,type=procs)
|
|
Output[procs]=Allreduce
|
|
if db((db.datacoll.user == userdata)&(db.datacoll.collection == collection)).count()==0:
|
|
Output["user"] = userdata
|
|
Output["collection"] = collection
|
|
db.datacoll.insert(**Output)
|
|
db.commit()
|
|
else:
|
|
db((db.datacoll.user == userdata)&(db.datacoll.collection == collection)).update(**Output)
|
|
db.commit()
|
|
|
|
@app.get("/showdata")
|
|
def showData():
|
|
userdata,collectionloaded
|
|
rows=db((db.datacoll.user == userdata)&(db.datacoll.collection == collectionloaded)).select()
|
|
first_row = rows.first()
|
|
return P(first_row.summary),P(first_row.tematic),P(first_row.issues),P(first_row.question)
|
|
|
|
|
|
|
|
|
|
|
|
@app.post("/createCollection")
|
|
def createCollection(data:str,collection:str):
|
|
print("Reading")
|
|
docs = SimpleDirectoryReader(
|
|
input_dir=data
|
|
).load_data()
|
|
print("Process Documents")
|
|
savedb(docs,userdata,collection)
|
|
|
|
Nodes=post_process_documents(docs)
|
|
print("create DB")
|
|
|
|
# class MyThread(threading.Thread):
|
|
# def run(self):
|
|
# print("Hilo")
|
|
# create_or_load_db(path="static/"+data.split("/")[1]+"/chroma_db",collection=collection,Nodes=Nodes,modelT=model_emb)
|
|
|
|
# # create and start the thread
|
|
# global t
|
|
# t = MyThread()
|
|
# t.start()
|
|
# global t_time
|
|
# t_time=time.time()
|
|
return Div("Iniciando carga de datos")
|
|
|
|
@app.get("/is_busy")
|
|
def is_busy():
|
|
try:
|
|
Busy= t.is_alive()
|
|
except:
|
|
Busy=False
|
|
if not Busy:
|
|
return H2("Estado: Disponible para carga de datos")
|
|
else:
|
|
return H2("Esta ocupados desde hace %s , este es un proceso largo"%(str(time.time()-t_time)))
|
|
|
|
|
|
|
|
@app.get("/")
|
|
def home():
|
|
|
|
page = Title('Super tutor'),Main(
|
|
Div(H1('Super tutor'),
|
|
menuusers(listUsers()),
|
|
#A('A link', href='https://example.com'),
|
|
#Img(src="https://placehold.co/200"),
|
|
Div("Archivos",id="files"),
|
|
|
|
Div(H2("Estado:Disponible para carga"),id="status",hx_target="this",hx_swap="innerHTML",hx_get="/is_busy",hx_trigger="every 60000ms"),
|
|
Div(
|
|
Div(Div(id="options",hx_target="this",hx_swap="outerHTML",hx_get="/listmodelactives",hx_trigger="click from:#buttonMenuuser delay:3s"),cls="col-xs-12"),
|
|
Div(Div(id="Infomodel"),cls="col-xs-12"),
|
|
#Div("Resumen",Div(id="summary",hx_target="this",hx_swap="outerHTML",hx_get="/SummarySources",hx_trigger="click from:#buttonMenuuser"),Div(id="summaryR")),
|
|
Div(H3("Chat para preguntarle al material de estudios "),
|
|
Div(
|
|
Form(
|
|
Input(id="question", name="message", placeholder="Dar una pregunta"),
|
|
Button("Submit",type="submit"), hx_post="/chatData",hx_swap="afterend",hx_target="#questionR" ),
|
|
Div(id="questionR")
|
|
,id="questions"),
|
|
cls="col-xs-6"),
|
|
Div(H3("Este genera información general del material, pero es intensivo en uso del api, aunque ahora esta restingido a solo 5 solicitudes el objeitvo original era enviar todo el material."),
|
|
Div(hx_target="this",hx_swap="innerHTML",hx_get="/showdata",hx_trigger="every 60000ms",id="query"),
|
|
id="chatbot",cls="col-xs-6"),
|
|
cls="row", style="color: #fff;")
|
|
))
|
|
return page
|
|
|
|
@app.get("/fileup")
|
|
def fileup():
|
|
return Div(
|
|
P('Upload data Super tutor'),
|
|
Div(H2("Subir Archivos"),Form(
|
|
Input(type='file', name='file',cls="col-xs-3"),
|
|
Input( name='dir',placeholder="Enter a directory",cls="col-xs-2"),
|
|
Input( name='user',placeholder="Enter a user",cls="col-xs-2"),
|
|
Button('Upload', type='submit',cls="col-xs-4"),
|
|
hx_post="/upload",
|
|
hx_target="#info",
|
|
hx_swap="innerHTML",
|
|
enctype="multipart/form-data",
|
|
),cls="col-xs-12"),
|
|
Div(H2("Crear Usuario"),Form(
|
|
Input( name='user',placeholder="Enter a user",cls="col-xs-7"),
|
|
Button('Upload', type='submit',cls="col-xs-5"),
|
|
hx_post="/createuser",
|
|
hx_target="#info",
|
|
hx_swap="innerHTML",
|
|
enctype="multipart/form-data",
|
|
),cls="col-xs-12"),
|
|
Div(H2("Crear grupo de Archivos"),Form(
|
|
Input( name='dir',placeholder="Enter a directory",cls="col-xs-4"),
|
|
Input( name='user',placeholder="Enter a user",cls="col-xs-3"),
|
|
Button('Upload', type='submit',cls="col-xs-5"),
|
|
hx_post="/creategroup",
|
|
hx_target="#info",
|
|
hx_swap="innerHTML",
|
|
enctype="multipart/form-data",
|
|
),cls="col-xs-12"),
|
|
Div(id="info")
|
|
|
|
|
|
)
|
|
|
|
|
|
@app.post("/upload")
|
|
def upload(file: UploadFile,dir : str = Form(...),user : str = Form(...)):
|
|
filenameB="static/"+user+"/"+dir+"/"+file.filename
|
|
pathB="static/"+user+"/"+dir
|
|
if not os.path.exists(pathB):
|
|
os.makedirs(pathB)
|
|
if not os.path.exists(filenameB):
|
|
with open(f"{filenameB}", "wb") as buffer:
|
|
shutil.copyfileobj(file.file, buffer)
|
|
return P("Se ha subido %s"%(filenameB))
|
|
|
|
|
|
@app.post("/createuser")
|
|
def createuser(user : str = Form(...)):
|
|
pathB="static/"+user
|
|
if not os.path.exists(pathB):
|
|
os.makedirs(pathB)
|
|
return P("Se ha creado %s"%(pathB))
|
|
|
|
@app.post("/creategroup")
|
|
def createuser(user : str = Form(...),dir : str = Form(...)):
|
|
pathB="static/"+user+"/"+dir
|
|
if not os.path.exists(pathB):
|
|
os.makedirs(pathB)
|
|
return P("Se ha creado %s"%(pathB))
|
|
|
|
|
|
app.mount("/static", StaticFiles(directory="static"), name="static")
|
|
|
|
|
|
|
|
|
|
serve()
|
|
|
|
|