Add is_creative_request reaorganize books.

This commit is contained in:
2026-04-07 16:21:02 -04:00
parent 9ad547f9cc
commit c818c43dcc
37 changed files with 15425 additions and 23 deletions

View File

@@ -83,7 +83,7 @@ import torch
# -------------------------
# Knowledge base selection
# -------------------------
BOOK_DIR = 'Books/SongWriting' # just a string
BOOK_DIR = 'Books/Accounting' # just a string
book_files = []
for f in Path(BOOK_DIR).rglob('*'):
@@ -584,6 +584,19 @@ def truncate_at_sentence(text, max_chars):
return truncated[:last_period + 1] if last_period > 0 else truncated
# -------------------------
# Determimne if the question is asking for a creative or factual response
# -------------------------
def is_creative_request(question):
triggers = {
"suggest", "write", "complete", "finish", "rhyme", "next line",
"come up with", "give me", "idea for", "open", "start", "begin",
"chorus", "verse", "bridge", "hook", "lyric", "lyrics",
"continue", "follow", "what comes", "how might", "how would"
}
q_lower = question.lower()
return any(t in q_lower for t in triggers)
# -------------------------
# Ask question
# -------------------------
@@ -600,10 +613,8 @@ def ask_question(question, show_sources=False, filter_term=None):
print(chunk[:300])
print("--- End chunks ---\n")
# context = " ".join(top_chunks)[:level_cfg["context_len"]]
context = truncate_at_sentence(" ".join(top_chunks), level_cfg["context_len"])
# Build conversation history string
history_text = ""
if conversation_history:
history_text = "Previous conversation:\n"
@@ -612,31 +623,42 @@ def ask_question(question, show_sources=False, filter_term=None):
history_text += f"A: {exchange['answer']}\n"
history_text += "\n"
prompt = (
f"You are a helpful research assistant. "
f"Answer the question using ONLY the provided context. "
f"Be direct and concise. "
f"Only say 'I don't know' if the context contains absolutely nothing relevant. "
f"Do not reference outside sources. "
f"Do not repeat or echo the conversation history in your answer. "
f"Do not include labels, separator lines, or notes in your answer. "
f"Stop immediately after answering, ending on a complete sentence."
if is_creative_request(question):
prompt_instruction = (
"You are a creative songwriting assistant. "
"Use the provided context as inspiration and technique guidance. "
"Generate original creative suggestions. "
"Be concise. Do not reproduce the context. "
"End your response with a single period."
)
else:
prompt_instruction = (
"You are a helpful research assistant. "
"Answer using ONLY the provided context. "
"Be direct and concise. Never repeat the context or instructions. "
"Never echo the question. End your answer with a single period."
)
if history_text:
prompt += f"HISTORY:\n{history_text}\n"
prompt += (
f"CONTEXT:\n{context}\n\n"
f"QUESTION: {question}\n\n"
f"ANSWER:"
)
with lm_model.chat_session():
response = lm_model.generate(prompt, max_tokens=level_cfg["max_tokens"])
with lm_model.chat_session(system_prompt=prompt_instruction):
user_message = (
f"{history_text}"
f"CONTEXT:\n{context}\n\n"
f"QUESTION: {question}\n\n"
f"ANSWER:"
)
response = lm_model.generate(
user_message,
max_tokens=level_cfg["max_tokens"]
)
answer = response.strip()
# Strip any runaway stop markers and everything after them
stop_markers = ["###", "####", "END OF ANSWER", "Final Answer", "STOP"]
for marker in stop_markers:
if marker in answer:
answer = answer[:answer.index(marker)].strip()
conversation_history.append({
"question": question,
"answer": answer