Feature: Add detailed LLM interaction logging for debugging
This commit is contained in:
parent
54593c31da
commit
d547f472ca
71
llm.go
71
llm.go
@ -197,6 +197,12 @@ func processArticleBatch(ollamaURL, model string, articles []Article) (map[strin
|
|||||||
logInfo("api", "Sending request to Ollama", requestDetails)
|
logInfo("api", "Sending request to Ollama", requestDetails)
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
|
|
||||||
|
// Log the complete batch prompt for debugging
|
||||||
|
logInfo("summarize_prompt", "Complete prompt sent to LLM for summarization", map[string]interface{}{
|
||||||
|
"batchSize": len(articles),
|
||||||
|
"prompt": batchPrompt.String(),
|
||||||
|
})
|
||||||
|
|
||||||
// Pretty print request JSON for logging
|
// Pretty print request JSON for logging
|
||||||
payloadBytes, err := json.MarshalIndent(payload, "", " ")
|
payloadBytes, err := json.MarshalIndent(payload, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -206,6 +212,15 @@ func processArticleBatch(ollamaURL, model string, articles []Article) (map[strin
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Log the request payload
|
||||||
|
logInfo("summarize_request", "Summarization API request", map[string]interface{}{
|
||||||
|
"model": model,
|
||||||
|
"batchSize": len(articles),
|
||||||
|
"apiEndpoint": ollamaURL + "/api/chat",
|
||||||
|
"payload": string(payloadBytes),
|
||||||
|
"articleIDs": extractArticleIDs(articles),
|
||||||
|
})
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", ollamaURL+"/api/chat", bytes.NewReader(payloadBytes))
|
req, err := http.NewRequest("POST", ollamaURL+"/api/chat", bytes.NewReader(payloadBytes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logInfo("api", "Request creation error", map[string]interface{}{
|
logInfo("api", "Request creation error", map[string]interface{}{
|
||||||
@ -235,6 +250,14 @@ func processArticleBatch(ollamaURL, model string, articles []Article) (map[strin
|
|||||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||||
apiDuration := time.Since(startTime)
|
apiDuration := time.Since(startTime)
|
||||||
|
|
||||||
|
// Log the raw response for debugging
|
||||||
|
logInfo("summarize_response", "Raw LLM response for summarization", map[string]interface{}{
|
||||||
|
"statusCode": resp.StatusCode,
|
||||||
|
"response": string(bodyBytes),
|
||||||
|
"durationMs": apiDuration.Milliseconds(),
|
||||||
|
"msPerArticle": apiDuration.Milliseconds() / int64(len(articles)),
|
||||||
|
})
|
||||||
|
|
||||||
// Pretty print response JSON for debugging
|
// Pretty print response JSON for debugging
|
||||||
var prettyJSON bytes.Buffer
|
var prettyJSON bytes.Buffer
|
||||||
err = json.Indent(&prettyJSON, bodyBytes, "", " ")
|
err = json.Indent(&prettyJSON, bodyBytes, "", " ")
|
||||||
@ -404,6 +427,12 @@ DO NOT include any other text, explanation, or formatting in your response, ONLY
|
|||||||
strings.Join(recentMessages, "\n"),
|
strings.Join(recentMessages, "\n"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Log the complete prompt for debugging purposes
|
||||||
|
logInfo("redundancy_prompt", "Complete prompt sent to LLM for redundancy check", map[string]interface{}{
|
||||||
|
"candidateID": candidate.ID,
|
||||||
|
"prompt": prompt,
|
||||||
|
})
|
||||||
|
|
||||||
// Get the URL for the Ollama API
|
// Get the URL for the Ollama API
|
||||||
ollamaURL := os.Getenv("OLLAMA_URL")
|
ollamaURL := os.Getenv("OLLAMA_URL")
|
||||||
if ollamaURL == "" {
|
if ollamaURL == "" {
|
||||||
@ -433,6 +462,16 @@ DO NOT include any other text, explanation, or formatting in your response, ONLY
|
|||||||
return false, "", fmt.Errorf("error marshaling request: %v", err)
|
return false, "", fmt.Errorf("error marshaling request: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Log the request payload
|
||||||
|
var prettyPayload bytes.Buffer
|
||||||
|
if err := json.Indent(&prettyPayload, payloadBytes, "", " "); err == nil {
|
||||||
|
logInfo("redundancy_request", "Redundancy check API request", map[string]interface{}{
|
||||||
|
"candidateID": candidate.ID,
|
||||||
|
"payload": prettyPayload.String(),
|
||||||
|
"url": ollamaURL + "/api/chat",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Create the request
|
// Create the request
|
||||||
req, err := http.NewRequest("POST", ollamaURL+"/api/chat", bytes.NewReader(payloadBytes))
|
req, err := http.NewRequest("POST", ollamaURL+"/api/chat", bytes.NewReader(payloadBytes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -483,6 +522,23 @@ DO NOT include any other text, explanation, or formatting in your response, ONLY
|
|||||||
return false, "Failed to read redundancy check response", nil
|
return false, "Failed to read redundancy check response", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Log the raw response for debugging
|
||||||
|
var prettyResponse bytes.Buffer
|
||||||
|
if err := json.Indent(&prettyResponse, bodyBytes, "", " "); err == nil {
|
||||||
|
logInfo("redundancy_response", "Raw LLM response for redundancy check", map[string]interface{}{
|
||||||
|
"candidateID": candidate.ID,
|
||||||
|
"statusCode": resp.StatusCode,
|
||||||
|
"response": prettyResponse.String(),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
// If we can't pretty print, log as is
|
||||||
|
logInfo("redundancy_response", "Raw LLM response for redundancy check (not JSON)", map[string]interface{}{
|
||||||
|
"candidateID": candidate.ID,
|
||||||
|
"statusCode": resp.StatusCode,
|
||||||
|
"response": string(bodyBytes),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Parse the response
|
// Parse the response
|
||||||
var result struct {
|
var result struct {
|
||||||
Message struct {
|
Message struct {
|
||||||
@ -501,6 +557,12 @@ DO NOT include any other text, explanation, or formatting in your response, ONLY
|
|||||||
content := result.Message.Content
|
content := result.Message.Content
|
||||||
content = strings.TrimSpace(content)
|
content = strings.TrimSpace(content)
|
||||||
|
|
||||||
|
// Log the extracted content
|
||||||
|
logInfo("redundancy_content", "Extracted content from LLM response", map[string]interface{}{
|
||||||
|
"candidateID": candidate.ID,
|
||||||
|
"content": content,
|
||||||
|
})
|
||||||
|
|
||||||
// Handle case where the response might be wrapped in markdown code blocks
|
// Handle case where the response might be wrapped in markdown code blocks
|
||||||
if strings.HasPrefix(content, "```json") {
|
if strings.HasPrefix(content, "```json") {
|
||||||
content = strings.TrimPrefix(content, "```json")
|
content = strings.TrimPrefix(content, "```json")
|
||||||
@ -551,3 +613,12 @@ DO NOT include any other text, explanation, or formatting in your response, ONLY
|
|||||||
|
|
||||||
return redundancyResult.IsRedundant, redundancyResult.Reason, nil
|
return redundancyResult.IsRedundant, redundancyResult.Reason, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper function to extract article IDs for logging
|
||||||
|
func extractArticleIDs(articles []Article) []string {
|
||||||
|
ids := make([]string, len(articles))
|
||||||
|
for i, article := range articles {
|
||||||
|
ids[i] = article.ID
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user