|
|
|
@ -9,7 +9,7 @@ import ( |
|
|
|
"log" |
|
|
|
"math" |
|
|
|
"net/http" |
|
|
|
"sort" |
|
|
|
"strconv" |
|
|
|
"strings" |
|
|
|
"sync" |
|
|
|
"time" |
|
|
|
@ -19,6 +19,30 @@ import ( |
|
|
|
"marmic/servicetrade-toolbox/internal/middleware" |
|
|
|
) |
|
|
|
|
|
|
|
// UploadResult represents the result of a single file upload
|
|
|
|
type UploadResult struct { |
|
|
|
JobID string |
|
|
|
DocName string |
|
|
|
Success bool |
|
|
|
Error string |
|
|
|
Data map[string]interface{} |
|
|
|
FileSize int64 |
|
|
|
} |
|
|
|
|
|
|
|
// UploadSession stores upload results for pagination
|
|
|
|
type UploadSession struct { |
|
|
|
Results []UploadResult |
|
|
|
TotalJobs int |
|
|
|
TotalSuccess int |
|
|
|
TotalFailure int |
|
|
|
TotalBytesUploaded int64 |
|
|
|
TotalTime time.Duration |
|
|
|
CreatedAt time.Time |
|
|
|
} |
|
|
|
|
|
|
|
// Global map to store upload sessions (in production, use Redis or database)
|
|
|
|
var uploadSessions = make(map[string]UploadSession) |
|
|
|
|
|
|
|
// DocumentsHandler handles the document upload page
|
|
|
|
func DocumentsHandler(w http.ResponseWriter, r *http.Request) { |
|
|
|
session, ok := r.Context().Value(middleware.SessionKey).(*api.Session) |
|
|
|
@ -312,15 +336,6 @@ func UploadDocumentsHandler(w http.ResponseWriter, r *http.Request) { |
|
|
|
const maxConcurrent = 5 |
|
|
|
const requestDelay = 300 * time.Millisecond |
|
|
|
|
|
|
|
type UploadResult struct { |
|
|
|
JobID string |
|
|
|
DocName string |
|
|
|
Success bool |
|
|
|
Error string |
|
|
|
Data map[string]interface{} |
|
|
|
FileSize int64 |
|
|
|
} |
|
|
|
|
|
|
|
totalUploads := len(jobs) * activeFilesProcessedCount |
|
|
|
resultsChan := make(chan UploadResult, totalUploads) |
|
|
|
var wg sync.WaitGroup |
|
|
|
@ -412,115 +427,139 @@ func UploadDocumentsHandler(w http.ResponseWriter, r *http.Request) { |
|
|
|
log.Println("All upload goroutines finished.") |
|
|
|
}() |
|
|
|
|
|
|
|
results := make(map[string][]UploadResult) |
|
|
|
resultsCount := 0 |
|
|
|
// Collect all results
|
|
|
|
var allResults []UploadResult |
|
|
|
var totalBytesUploaded int64 |
|
|
|
|
|
|
|
for result := range resultsChan { |
|
|
|
resultsCount++ |
|
|
|
log.Printf("Received result %d/%d: Job %s, File %s, Success: %v, Size: %.2f MB", |
|
|
|
resultsCount, totalUploads, result.JobID, result.DocName, result.Success, |
|
|
|
float64(result.FileSize)/(1024*1024)) |
|
|
|
log.Printf("Received result: Job %s, File %s, Success: %v, Size: %.2f MB", |
|
|
|
result.JobID, result.DocName, result.Success, float64(result.FileSize)/(1024*1024)) |
|
|
|
if result.Success { |
|
|
|
totalBytesUploaded += result.FileSize |
|
|
|
} |
|
|
|
if _, exists := results[result.JobID]; !exists { |
|
|
|
results[result.JobID] = []UploadResult{} |
|
|
|
} |
|
|
|
results[result.JobID] = append(results[result.JobID], result) |
|
|
|
allResults = append(allResults, result) |
|
|
|
} |
|
|
|
|
|
|
|
// Calculate total duration
|
|
|
|
totalDuration := time.Since(startTime) |
|
|
|
log.Printf("All results collected. Total: %d, Total bytes uploaded: %.2f MB, Total time: %v", |
|
|
|
resultsCount, float64(totalBytesUploaded)/(1024*1024), totalDuration) |
|
|
|
|
|
|
|
var resultHTML bytes.Buffer |
|
|
|
var totalSuccess, totalFailure int |
|
|
|
for _, jobResults := range results { |
|
|
|
for _, result := range jobResults { |
|
|
|
if result.Success { |
|
|
|
totalSuccess++ |
|
|
|
} else { |
|
|
|
totalFailure++ |
|
|
|
} |
|
|
|
len(allResults), float64(totalBytesUploaded)/(1024*1024), totalDuration) |
|
|
|
|
|
|
|
// Store results in session for pagination
|
|
|
|
sessionID := fmt.Sprintf("upload_%d", time.Now().UnixNano()) |
|
|
|
uploadSession := UploadSession{ |
|
|
|
Results: allResults, |
|
|
|
TotalJobs: len(jobs), |
|
|
|
TotalSuccess: 0, |
|
|
|
TotalFailure: 0, |
|
|
|
TotalBytesUploaded: totalBytesUploaded, |
|
|
|
TotalTime: totalDuration, |
|
|
|
CreatedAt: time.Now(), |
|
|
|
} |
|
|
|
|
|
|
|
// Calculate totals
|
|
|
|
for _, result := range allResults { |
|
|
|
if result.Success { |
|
|
|
uploadSession.TotalSuccess++ |
|
|
|
} else { |
|
|
|
uploadSession.TotalFailure++ |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
resultHTML.WriteString("<div class=\"upload-summary\">") |
|
|
|
resultHTML.WriteString("<h3>Upload Results</h3>") |
|
|
|
resultHTML.WriteString("<div class=\"upload-stats\">") |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<div class=\"stat-box\"><div class=\"stat-value\">%d</div><div class=\"stat-label\">Total Jobs</div></div>", len(results))) |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<div class=\"stat-box success-stat\"><div class=\"stat-value\">%d</div><div class=\"stat-label\">Successful Uploads</div></div>", totalSuccess)) |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<div class=\"stat-box error-stat\"><div class=\"stat-value\">%d</div><div class=\"stat-label\">Failed Uploads</div></div>", totalFailure)) |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<div class=\"stat-box\"><div class=\"stat-value\">%d</div><div class=\"stat-label\">Files Processed</div></div>", resultsCount)) |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<div class=\"stat-box\"><div class=\"stat-value\">%v</div><div class=\"stat-label\">Total Time</div></div>", totalDuration)) |
|
|
|
// Store in global map (in production, use Redis or database)
|
|
|
|
uploadSessions[sessionID] = uploadSession |
|
|
|
|
|
|
|
if totalFailure == 0 && resultsCount > 0 { |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<p>All documents were successfully uploaded to ServiceTrade in %v!</p>", totalDuration)) |
|
|
|
} else if resultsCount == 0 { |
|
|
|
resultHTML.WriteString("<p>No documents were processed for upload.</p>") |
|
|
|
} else { |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<p>Some documents failed to upload. Process completed in %v. See details below.</p>", totalDuration)) |
|
|
|
} |
|
|
|
resultHTML.WriteString("</div>") |
|
|
|
|
|
|
|
resultHTML.WriteString("<div class=\"job-results\">") |
|
|
|
sortedJobs := make([]string, 0, len(results)) |
|
|
|
for jobID := range results { |
|
|
|
sortedJobs = append(sortedJobs, jobID) |
|
|
|
} |
|
|
|
sort.Strings(sortedJobs) |
|
|
|
|
|
|
|
for _, jobID := range sortedJobs { |
|
|
|
jobResults := results[jobID] |
|
|
|
jobHasSuccess := false |
|
|
|
jobHasFailure := false |
|
|
|
for _, result := range jobResults { |
|
|
|
if result.Success { |
|
|
|
jobHasSuccess = true |
|
|
|
} else { |
|
|
|
jobHasFailure = true |
|
|
|
} |
|
|
|
} |
|
|
|
jobClass := "neutral" |
|
|
|
if jobHasSuccess && !jobHasFailure { |
|
|
|
jobClass = "success" |
|
|
|
} else if jobHasFailure { |
|
|
|
jobClass = "error" |
|
|
|
} |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<div class=\"job-result %s\">", jobClass)) |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<div class=\"job-id\">Job ID: %s</div>", jobID)) |
|
|
|
if len(jobResults) > 0 { |
|
|
|
resultHTML.WriteString("<div class=\"file-results\">") |
|
|
|
sort.Slice(jobResults, func(i, j int) bool { |
|
|
|
return jobResults[i].DocName < jobResults[j].DocName |
|
|
|
}) |
|
|
|
for _, result := range jobResults { |
|
|
|
fileClass := "success" |
|
|
|
icon := "✓" |
|
|
|
message := "Successfully uploaded" |
|
|
|
if !result.Success { |
|
|
|
fileClass = "error" |
|
|
|
icon = "✗" |
|
|
|
message = strings.ReplaceAll(result.Error, "<", "<") |
|
|
|
message = strings.ReplaceAll(message, ">", ">") |
|
|
|
} |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<div class=\"file-result %s\">", fileClass)) |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<span class=\"status-icon\">%s</span>", icon)) |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<span class=\"file-name\">%s:</span>", result.DocName)) |
|
|
|
resultHTML.WriteString(fmt.Sprintf("<span class=\"file-message\">%s</span>", message)) |
|
|
|
resultHTML.WriteString("</div>") |
|
|
|
} |
|
|
|
resultHTML.WriteString("</div>") |
|
|
|
// Return first page of results
|
|
|
|
renderUploadResultsPage(w, sessionID, 1, 20) // Default to page 1, 20 items per page
|
|
|
|
} |
|
|
|
|
|
|
|
// UploadResultsHandler handles pagination for upload results
|
|
|
|
func UploadResultsHandler(w http.ResponseWriter, r *http.Request) { |
|
|
|
sessionID := r.URL.Query().Get("session_id") |
|
|
|
if sessionID == "" { |
|
|
|
http.Error(w, "Session ID required", http.StatusBadRequest) |
|
|
|
return |
|
|
|
} |
|
|
|
|
|
|
|
page, _ := strconv.Atoi(r.URL.Query().Get("page")) |
|
|
|
if page < 1 { |
|
|
|
page = 1 |
|
|
|
} |
|
|
|
|
|
|
|
limit, _ := strconv.Atoi(r.URL.Query().Get("limit")) |
|
|
|
if limit < 1 { |
|
|
|
limit = 20 |
|
|
|
} |
|
|
|
|
|
|
|
renderUploadResultsPage(w, sessionID, page, limit) |
|
|
|
} |
|
|
|
|
|
|
|
// renderUploadResultsPage renders a paginated page of upload results
|
|
|
|
func renderUploadResultsPage(w http.ResponseWriter, sessionID string, page, limit int) { |
|
|
|
uploadSession, exists := uploadSessions[sessionID] |
|
|
|
if !exists { |
|
|
|
http.Error(w, "Upload session not found", http.StatusNotFound) |
|
|
|
return |
|
|
|
} |
|
|
|
|
|
|
|
totalResults := len(uploadSession.Results) |
|
|
|
totalPages := (totalResults + limit - 1) / limit // Ceiling division
|
|
|
|
|
|
|
|
if page > totalPages && totalPages > 0 { |
|
|
|
page = totalPages |
|
|
|
} |
|
|
|
|
|
|
|
startIndex := (page - 1) * limit |
|
|
|
endIndex := startIndex + limit |
|
|
|
if endIndex > totalResults { |
|
|
|
endIndex = totalResults |
|
|
|
} |
|
|
|
|
|
|
|
// Get results for this page
|
|
|
|
var pageResults []UploadResult |
|
|
|
if startIndex < totalResults { |
|
|
|
pageResults = uploadSession.Results[startIndex:endIndex] |
|
|
|
} |
|
|
|
|
|
|
|
// Calculate pagination info
|
|
|
|
startPage := 1 |
|
|
|
endPage := totalPages |
|
|
|
if totalPages > 10 { |
|
|
|
if page <= 5 { |
|
|
|
endPage = 10 |
|
|
|
} else if page >= totalPages-4 { |
|
|
|
startPage = totalPages - 9 |
|
|
|
} else { |
|
|
|
resultHTML.WriteString("<p>No file upload results for this job.</p>") |
|
|
|
startPage = page - 4 |
|
|
|
endPage = page + 5 |
|
|
|
} |
|
|
|
resultHTML.WriteString("</div>") |
|
|
|
} |
|
|
|
resultHTML.WriteString("</div>") |
|
|
|
w.Header().Set("Content-Type", "text/html") |
|
|
|
w.Write(resultHTML.Bytes()) |
|
|
|
|
|
|
|
data := map[string]interface{}{ |
|
|
|
"Results": pageResults, |
|
|
|
"TotalJobs": uploadSession.TotalJobs, |
|
|
|
"TotalSuccess": uploadSession.TotalSuccess, |
|
|
|
"TotalFailure": uploadSession.TotalFailure, |
|
|
|
"TotalBytesUploaded": uploadSession.TotalBytesUploaded, |
|
|
|
"TotalTime": uploadSession.TotalTime, |
|
|
|
"TotalResults": totalResults, |
|
|
|
"TotalPages": totalPages, |
|
|
|
"CurrentPage": page, |
|
|
|
"Limit": limit, |
|
|
|
"StartIndex": startIndex + 1, |
|
|
|
"EndIndex": endIndex, |
|
|
|
"StartPage": startPage, |
|
|
|
"EndPage": endPage, |
|
|
|
"SessionID": sessionID, |
|
|
|
} |
|
|
|
|
|
|
|
tmpl := root.WebTemplates |
|
|
|
if err := tmpl.ExecuteTemplate(w, "upload_results_pagination", data); err != nil { |
|
|
|
log.Printf("Template execution error: %v", err) |
|
|
|
// Don't call http.Error here as the response may have already started
|
|
|
|
// Just log the error and return
|
|
|
|
return |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// readCloserWithSize is a custom io.Reader that counts the bytes read
|
|
|
|
|