an updated and hopefully faster version of the ST Toolbox
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

923 lines
28 KiB

package web
import (
"bytes"
"encoding/csv"
"encoding/json"
"fmt"
"io"
"log"
"math"
"net/http"
"strconv"
"strings"
"sync"
"time"
root "marmic/servicetrade-toolbox"
"marmic/servicetrade-toolbox/internal/api"
"marmic/servicetrade-toolbox/internal/middleware"
"marmic/servicetrade-toolbox/internal/utils"
)
// UploadResult represents the result of a single file upload
type UploadResult struct {
JobID string
DocName string
Success bool
Error string
Data map[string]interface{}
FileSize int64
}
// UploadSession stores upload results for pagination
type UploadSession struct {
Results []UploadResult
GroupedResults []struct {
JobID string
FilesFound int
FilesUploaded int
Success bool
ErrorMsg string
Files []struct {
Name string
Success bool
Error string
FileSize int64
}
}
TotalJobs int
TotalSuccess int
TotalFailure int
TotalBytesUploaded int64
TotalTime time.Duration
CreatedAt time.Time
}
// Global map to store upload sessions (in production, use Redis or database)
var uploadSessions = make(map[string]UploadSession)
// DocumentsHandler handles the document upload page
func DocumentsHandler(w http.ResponseWriter, r *http.Request) {
session, ok := r.Context().Value(middleware.SessionKey).(*api.Session)
if !ok {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
tmpl := root.WebTemplates
data := map[string]interface{}{
"Title": "Document Uploads",
"Session": session,
}
if r.Header.Get("HX-Request") == "true" {
// For HTMX requests, just send the document_upload partial
if err := tmpl.ExecuteTemplate(w, "document_upload", data); err != nil {
log.Printf("Template execution error: %v", err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
return
}
} else {
// For full page requests, first render document_upload into a buffer
var contentBuf bytes.Buffer
if err := tmpl.ExecuteTemplate(&contentBuf, "document_upload", data); err != nil {
log.Printf("Template execution error: %v", err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
return
}
// Add the rendered content to the data for the layout
data["BodyContent"] = contentBuf.String()
// Now render the layout with our content
if err := tmpl.ExecuteTemplate(w, "layout.html", data); err != nil {
log.Printf("Template execution error: %v", err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
return
}
}
}
// ProcessCSVHandler processes a CSV file with job numbers
func ProcessCSVHandler(w http.ResponseWriter, r *http.Request) {
_, ok := r.Context().Value(middleware.SessionKey).(*api.Session)
if !ok {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
// Check if the request method is POST
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Parse the multipart form data with a 10MB limit
if err := r.ParseMultipartForm(10 << 20); err != nil {
http.Error(w, "Unable to parse form: "+err.Error(), http.StatusBadRequest)
return
}
// Get the file from the form
file, _, err := r.FormFile("csvFile")
if err != nil {
http.Error(w, "Error retrieving file: "+err.Error(), http.StatusBadRequest)
return
}
defer file.Close()
// Read the CSV data
csvData, err := csv.NewReader(file).ReadAll()
if err != nil {
http.Error(w, "Error reading CSV file: "+err.Error(), http.StatusBadRequest)
return
}
if len(csvData) < 2 {
http.Error(w, "CSV file must contain at least a header row and one data row", http.StatusBadRequest)
return
}
// Find the index of the 'id' column
headerRow := csvData[0]
idColumnIndex := -1
for i, header := range headerRow {
if strings.ToLower(strings.TrimSpace(header)) == "id" {
idColumnIndex = i
break
}
}
// If 'id' column not found, try the first column
if idColumnIndex == -1 {
idColumnIndex = 0
log.Printf("No 'id' column found in CSV, using first column (header: %s)", headerRow[0])
} else {
log.Printf("Found 'id' column at index %d", idColumnIndex)
}
// Extract job numbers from the CSV
var jobNumbers []string
for rowIndex, row := range csvData {
// Skip header row
if rowIndex == 0 {
continue
}
if len(row) > idColumnIndex {
// Extract and clean up the job ID
jobID := strings.TrimSpace(row[idColumnIndex])
if jobID != "" {
jobNumbers = append(jobNumbers, jobID)
}
}
}
totalJobs := len(jobNumbers)
log.Printf("Extracted %d job IDs from CSV", totalJobs)
if totalJobs == 0 {
http.Error(w, "No valid job IDs found in the CSV file", http.StatusBadRequest)
return
}
// Create a hidden input with the job IDs
jobsValue := strings.Join(jobNumbers, ",")
// Generate HTML for the main response (hidden input for job-ids-container)
var responseHTML bytes.Buffer
responseHTML.WriteString(fmt.Sprintf(`<input type="hidden" name="jobNumbers" value="%s">`, jobsValue))
responseHTML.WriteString(fmt.Sprintf(`<p>Found <strong>%d</strong> job(s) in the CSV file</p>`, totalJobs))
// Generate out-of-band swap for the preview section - simplified version
responseHTML.WriteString(fmt.Sprintf(`
<div id="csv-preview" class="fade-me-out csv-preview-active" hx-swap-oob="true">
<h4>✓ Jobs Detected</h4>
<p>Upload to <strong>%d</strong> job(s)</p>
</div>
`, totalJobs))
w.Header().Set("Content-Type", "text/html")
w.Write(responseHTML.Bytes())
}
// UploadDocumentsHandler handles document uploads to jobs
func UploadDocumentsHandler(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
session, ok := r.Context().Value(middleware.SessionKey).(*api.Session)
if !ok {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
log.Printf("Starting document upload handler with Content-Length: %.2f MB",
float64(r.ContentLength)/(1024*1024))
maxMemory := int64(32 << 20) // 32MB in memory, rest to disk
if err := r.ParseMultipartForm(maxMemory); err != nil {
log.Printf("Error parsing multipart form: %v", err)
http.Error(w, "Unable to parse form: "+err.Error(), http.StatusBadRequest)
return
}
defer r.MultipartForm.RemoveAll() // Clean up temporary files
jobNumbers := r.FormValue("jobNumbers")
if jobNumbers == "" {
log.Printf("No job numbers found in hidden 'jobNumbers' input.")
http.Error(w, "No job numbers provided", http.StatusBadRequest)
return
}
log.Printf("Job numbers: %s", jobNumbers)
jobs := strings.Split(jobNumbers, ",")
if len(jobs) == 0 {
http.Error(w, "No valid job numbers provided", http.StatusBadRequest)
return
}
// Read and parse file metadata from form values
displayNamesJSON := r.FormValue("file_display_names")
documentTypesJSON := r.FormValue("file_document_types")
isActiveFlagsJSON := r.FormValue("file_is_active_flags")
var displayNames []string
var documentTypes []string
var isActiveFlags []bool
if err := json.Unmarshal([]byte(displayNamesJSON), &displayNames); err != nil {
log.Printf("Error unmarshalling displayNames: %v. JSON: %s", err, displayNamesJSON)
http.Error(w, "Error processing file information (display names). Please try again.", http.StatusBadRequest)
return
}
if err := json.Unmarshal([]byte(documentTypesJSON), &documentTypes); err != nil {
log.Printf("Error unmarshalling documentTypes: %v. JSON: %s", err, documentTypesJSON)
http.Error(w, "Error processing file information (document types). Please try again.", http.StatusBadRequest)
return
}
if err := json.Unmarshal([]byte(isActiveFlagsJSON), &isActiveFlags); err != nil {
log.Printf("Error unmarshalling isActiveFlags: %v. JSON: %s", err, isActiveFlagsJSON)
http.Error(w, "Error processing file information (active flags). Please try again.", http.StatusBadRequest)
return
}
fileHeaders := r.MultipartForm.File["documentFiles"]
if len(fileHeaders) == 0 {
// This case might occur if all files were marked inactive on the client,
// but client still submitted. Or if no files were selected initially.
log.Println("No document files received in the request.")
// Depending on desired behavior, could be an error or a silent success if no active files were intended.
// For now, let user know no files were processed if this path is hit.
w.Write([]byte("<p>No files were processed. If you selected files, please ensure some are active for upload.</p>"))
return
}
// Validate that the lengths of metadata arrays match the number of files
if len(fileHeaders) != len(displayNames) || len(fileHeaders) != len(documentTypes) || len(fileHeaders) != len(isActiveFlags) {
log.Printf("Metadata array length mismatch. Files: %d, Names: %d, Types: %d, ActiveFlags: %d",
len(fileHeaders), len(displayNames), len(documentTypes), len(isActiveFlags))
http.Error(w, "Mismatch in file metadata. Please clear selection and try uploading files again.", http.StatusBadRequest)
return
}
type FileToUploadMetadata struct {
OriginalFilename string
DisplayName string
Type string
Content []byte // Store file content in memory
}
var filesToUpload []FileToUploadMetadata
for i, fileHeader := range fileHeaders {
if !isActiveFlags[i] {
log.Printf("Skipping file %s (original index %d) as it's marked inactive.", fileHeader.Filename, i)
continue // Skip inactive files
}
uploadedFile, err := fileHeader.Open()
if err != nil {
log.Printf("Error opening uploaded file %s (original index %d): %v. Skipping.", fileHeader.Filename, i, err)
// No need to close uploadedFile here as it wasn't successfully opened.
continue
}
displayName := displayNames[i]
docType := documentTypes[i]
if strings.TrimSpace(displayName) == "" {
displayName = fileHeader.Filename // Fallback to original filename
log.Printf("Warning: Empty display name for file %s (original index %d), using original filename.", fileHeader.Filename, i)
}
// Basic validation for docType could be added here if necessary, e.g., ensure it's not empty.
if strings.TrimSpace(docType) == "" {
docType = "1" // Fallback to a default type
log.Printf("Warning: Empty document type for file %s (original index %d, display name %s), using default type '1'.", fileHeader.Filename, i, displayName)
}
// Read file content into memory
fileBytes, err := io.ReadAll(uploadedFile)
if err != nil {
log.Printf("Error reading content of uploaded file %s (original index %d, display name %s): %v. Skipping.", fileHeader.Filename, i, displayName, err)
uploadedFile.Close()
continue
}
uploadedFile.Close() // Close the multipart file handle after reading its content
metadata := FileToUploadMetadata{
OriginalFilename: fileHeader.Filename,
DisplayName: displayName,
Type: docType,
Content: fileBytes,
}
filesToUpload = append(filesToUpload, metadata)
}
activeFilesProcessedCount := len(filesToUpload)
if activeFilesProcessedCount == 0 {
log.Println("No active files to upload after filtering.")
// Send a user-friendly message back. The resultHTML later will also reflect this.
w.Write([]byte("<p>No active files were selected for upload. Please ensure files are selected and not marked as removed.</p>"))
return
}
log.Printf("Total active files prepared for upload: %d", activeFilesProcessedCount)
const maxConcurrent = 5
const requestDelay = 300 * time.Millisecond
totalUploads := len(jobs) * activeFilesProcessedCount
resultsChan := make(chan UploadResult, totalUploads)
var wg sync.WaitGroup
semaphore := make(chan struct{}, maxConcurrent)
log.Printf("Starting up to %d concurrent upload workers for %d total uploads (%d jobs x %d active files)",
maxConcurrent, totalUploads, len(jobs), activeFilesProcessedCount)
for _, jobID := range jobs {
for _, metadataToUpload := range filesToUpload {
currentUploadMetadata := metadataToUpload
wg.Add(1)
go func(jobID string, meta FileToUploadMetadata) {
defer wg.Done()
semaphore <- struct{}{}
defer func() {
<-semaphore
// No temp file to remove here as content is in memory
}()
time.Sleep(requestDelay)
fileNameForUpload := meta.DisplayName
fileReaderForUpload := io.NopCloser(bytes.NewReader(meta.Content))
expectedSize := int64(len(meta.Content))
// Error handling for fileReaderForUpload (e.g. if meta.Content is nil) is implicitly handled by API call failures later
// but good to be mindful. For now, assume meta.Content is valid if it reached here.
if len(jobs) > 10 {
jitter := time.Duration(100+(time.Now().UnixNano()%400)) * time.Millisecond
time.Sleep(jitter)
}
sizeTracker := &readCloserWithSize{reader: fileReaderForUpload, size: 0}
log.Printf("Goroutine Info: Starting to stream in-memory data (original: %s, uploading as %s, type: %s, size: %.2f MB) to job %s",
meta.OriginalFilename, fileNameForUpload, meta.Type, float64(expectedSize)/(1024*1024), jobID)
// Define uploadStart here for per-goroutine timing
uploadStartGoroutine := time.Now()
uploadResultData, errUpload := session.UploadAttachmentFile(jobID, fileNameForUpload, meta.Type, sizeTracker)
uploadDuration := time.Since(uploadStartGoroutine)
fileSizeUploaded := sizeTracker.Size()
sizeMatch := true
if expectedSize > 0 && math.Abs(float64(expectedSize-fileSizeUploaded)) > float64(expectedSize)*0.05 {
sizeMatch = false
log.Printf("Goroutine WARNING: Size mismatch for in-memory data (original: %s, uploaded as %s) to job %s. Expected: %d, Uploaded: %d",
meta.OriginalFilename, fileNameForUpload, jobID, expectedSize, fileSizeUploaded)
}
if errUpload != nil {
log.Printf("Goroutine Error: Uploading in-memory data (original: %s, as %s) to job %s failed after %v: %v",
meta.OriginalFilename, fileNameForUpload, jobID, uploadDuration, errUpload)
resultsChan <- UploadResult{
JobID: jobID,
DocName: fileNameForUpload,
Success: false,
Error: errUpload.Error(),
FileSize: fileSizeUploaded,
}
} else if !sizeMatch {
log.Printf("Goroutine Error: Upload of in-memory data (original: %s, as %s) to job %s appears corrupted. API reported success but file sizes mismatch.",
meta.OriginalFilename, fileNameForUpload, jobID)
resultsChan <- UploadResult{
JobID: jobID,
DocName: fileNameForUpload,
Success: false,
Error: "Upload appears corrupted (file size mismatch)",
FileSize: fileSizeUploaded,
}
} else {
log.Printf("Goroutine Success: Uploaded in-memory data (original: %s, %.2f MB, as %s, type: %s) to job %s in %v",
meta.OriginalFilename, float64(fileSizeUploaded)/(1024*1024), fileNameForUpload, meta.Type, jobID, uploadDuration)
resultsChan <- UploadResult{
JobID: jobID,
DocName: fileNameForUpload,
Success: true,
Data: uploadResultData,
FileSize: fileSizeUploaded,
}
}
}(jobID, currentUploadMetadata)
}
}
go func() {
wg.Wait()
close(resultsChan)
log.Println("All upload goroutines finished.")
}()
// Collect all results
var allResults []UploadResult
var totalBytesUploaded int64
for result := range resultsChan {
log.Printf("Received result: Job %s, File %s, Success: %v, Size: %.2f MB",
result.JobID, result.DocName, result.Success, float64(result.FileSize)/(1024*1024))
if result.Success {
totalBytesUploaded += result.FileSize
}
allResults = append(allResults, result)
}
// Calculate total duration
totalDuration := time.Since(startTime)
log.Printf("All results collected. Total: %d, Total bytes uploaded: %.2f MB, Total time: %v",
len(allResults), float64(totalBytesUploaded)/(1024*1024), totalDuration)
// Group results by job for consistent display with removal results
type JobUploadResult struct {
JobID string
FilesFound int
FilesUploaded int
Success bool
ErrorMsg string
Files []struct {
Name string
Success bool
Error string
FileSize int64
}
}
// Group results by job
jobResults := make(map[string]*JobUploadResult)
for _, result := range allResults {
if jobResult, exists := jobResults[result.JobID]; exists {
// Add file to existing job
jobResult.FilesFound++
if result.Success {
jobResult.FilesUploaded++
} else {
jobResult.Success = false
if jobResult.ErrorMsg == "" {
jobResult.ErrorMsg = "Some files failed to upload"
}
}
jobResult.Files = append(jobResult.Files, struct {
Name string
Success bool
Error string
FileSize int64
}{
Name: result.DocName,
Success: result.Success,
Error: result.Error,
FileSize: result.FileSize,
})
} else {
// Create new job result
jobResults[result.JobID] = &JobUploadResult{
JobID: result.JobID,
FilesFound: 1,
FilesUploaded: 0,
Success: result.Success,
ErrorMsg: "",
Files: []struct {
Name string
Success bool
Error string
FileSize int64
}{
{
Name: result.DocName,
Success: result.Success,
Error: result.Error,
FileSize: result.FileSize,
},
},
}
if result.Success {
jobResults[result.JobID].FilesUploaded = 1
} else {
jobResults[result.JobID].ErrorMsg = "Some files failed to upload"
}
}
}
// Convert grouped results to slice
var groupedResults []struct {
JobID string
FilesFound int
FilesUploaded int
Success bool
ErrorMsg string
Files []struct {
Name string
Success bool
Error string
FileSize int64
}
}
for _, jobResult := range jobResults {
groupedResults = append(groupedResults, *jobResult)
}
// Store results in session for pagination
sessionID := fmt.Sprintf("upload_%d", time.Now().UnixNano())
uploadSession := UploadSession{
Results: allResults, // Keep original results for backward compatibility
GroupedResults: groupedResults,
TotalJobs: len(jobs),
TotalSuccess: 0,
TotalFailure: 0,
TotalBytesUploaded: totalBytesUploaded,
TotalTime: totalDuration,
CreatedAt: time.Now(),
}
// Calculate totals
for _, result := range allResults {
if result.Success {
uploadSession.TotalSuccess++
} else {
uploadSession.TotalFailure++
}
}
// Store in global map (in production, use Redis or database)
uploadSessions[sessionID] = uploadSession
// Get configurable page size from form, with fallback to default
limitStr := r.FormValue("limit")
limit := utils.DefaultPageSize
if limitStr != "" {
if parsedLimit, err := strconv.Atoi(limitStr); err == nil && parsedLimit > 0 {
if parsedLimit > utils.MaxPageSize {
limit = utils.MaxPageSize
} else {
limit = parsedLimit
}
}
}
// Return first page of results with configurable page size
renderUploadResultsPage(w, sessionID, utils.DefaultPage, limit, "all")
}
// UploadResultsHandler handles pagination for upload results
func UploadResultsHandler(w http.ResponseWriter, r *http.Request) {
sessionID := r.URL.Query().Get("session_id")
if sessionID == "" {
http.Error(w, "Session ID required", http.StatusBadRequest)
return
}
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
if page < 1 {
page = utils.DefaultPage
}
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
if limit < 1 {
limit = utils.DefaultPageSize
}
// Optional filter: all|success|failed
filter := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("filter")))
if filter != "success" && filter != "failed" {
filter = "all"
}
renderUploadResultsPage(w, sessionID, page, limit, filter)
}
// renderUploadResultsPage renders a paginated page of upload results
func renderUploadResultsPage(w http.ResponseWriter, sessionID string, page, limit int, filter string) {
uploadSession, exists := uploadSessions[sessionID]
if !exists {
http.Error(w, "Upload session not found", http.StatusNotFound)
return
}
// Build a filtered view per job at the file level
var filteredJobs []struct {
JobID string
FilesFound int
FilesUploaded int
Success bool
ErrorMsg string
Files []struct {
Name string
Success bool
Error string
FileSize int64
}
}
for _, jr := range uploadSession.GroupedResults {
// Filter files per job
var files []struct {
Name string
Success bool
Error string
FileSize int64
}
for _, f := range jr.Files {
if filter == "success" && !f.Success {
continue
}
if filter == "failed" && f.Success {
continue
}
files = append(files, f)
}
if len(files) == 0 {
// Skip jobs that have no files matching the filter
if filter != "all" {
continue
}
}
// Copy job with filtered files
copy := struct {
JobID string
FilesFound int
FilesUploaded int
Success bool
ErrorMsg string
Files []struct {
Name string
Success bool
Error string
FileSize int64
}
}{
JobID: jr.JobID,
FilesFound: jr.FilesFound,
FilesUploaded: jr.FilesUploaded,
Success: jr.Success,
ErrorMsg: jr.ErrorMsg,
Files: func() []struct {
Name string
Success bool
Error string
FileSize int64
} {
return files
}(),
}
filteredJobs = append(filteredJobs, copy)
}
totalResults := len(filteredJobs)
pagination := utils.CalculatePagination(totalResults, page, limit)
// Get results for this page
startIndex := (pagination.CurrentPage - 1) * pagination.Limit
endIndex := startIndex + pagination.Limit
if endIndex > totalResults {
endIndex = totalResults
}
pageResults := utils.GetPageResults(filteredJobs, startIndex, endIndex)
// Add pagination info to each job result for the template
var resultsWithPagination []map[string]interface{}
for _, jobResult := range pageResults {
filesLen := len(jobResult.Files)
displaySuccess := (filter == "success") || (filter != "failed" && jobResult.Success)
resultMap := map[string]interface{}{
"JobID": jobResult.JobID,
"FilesFound": jobResult.FilesFound,
"FilesUploaded": jobResult.FilesUploaded,
"Success": jobResult.Success,
"ErrorMsg": jobResult.ErrorMsg,
"Files": jobResult.Files,
"FilePage": 1, // Default to first file
"TotalFiles": filesLen,
"SessionID": sessionID,
"Filter": filter,
"DisplaySuccess": displaySuccess,
}
resultsWithPagination = append(resultsWithPagination, resultMap)
}
data := map[string]interface{}{
"Results": resultsWithPagination,
"TotalJobs": uploadSession.TotalJobs,
"TotalSuccess": uploadSession.TotalSuccess,
"TotalFailure": uploadSession.TotalFailure,
"TotalBytesUploaded": uploadSession.TotalBytesUploaded,
"TotalTime": uploadSession.TotalTime,
"TotalResults": pagination.TotalResults,
"TotalPages": pagination.TotalPages,
"CurrentPage": pagination.CurrentPage,
"Limit": pagination.Limit,
"StartIndex": pagination.StartIndex,
"EndIndex": pagination.EndIndex,
"StartPage": pagination.StartPage,
"EndPage": pagination.EndPage,
"SessionID": sessionID,
"Filter": filter,
}
tmpl := root.WebTemplates
if err := tmpl.ExecuteTemplate(w, "upload_results_pagination", data); err != nil {
log.Printf("Template execution error: %v", err)
return
}
}
// readCloserWithSize is a custom io.Reader that counts the bytes read
type readCloserWithSize struct {
reader io.ReadCloser
size int64
}
func (r *readCloserWithSize) Read(p []byte) (n int, err error) {
n, err = r.reader.Read(p)
r.size += int64(n)
return n, err
}
func (r *readCloserWithSize) Close() error {
if r.reader != nil {
return r.reader.Close()
}
return nil // Allow closing nil reader safely
}
func (r *readCloserWithSize) Size() int64 {
return r.size
}
// DocumentFieldAddHandler and DocumentFieldRemoveHandler are REMOVED
// as they are no longer needed with the multi-file input and new chip UI.
// New handler: Serve a single job card with only one file (per-job file pagination)
func UploadJobFileHandler(w http.ResponseWriter, r *http.Request) {
sessionID := r.URL.Query().Get("session_id")
jobID := r.URL.Query().Get("job_id")
filePageStr := r.URL.Query().Get("file_page")
filePage := 1
if filePageStr != "" {
if parsed, err := strconv.Atoi(filePageStr); err == nil && parsed > 0 {
filePage = parsed
}
}
filter := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("filter")))
if filter != "success" && filter != "failed" {
filter = "all"
}
uploadSession, exists := uploadSessions[sessionID]
if !exists {
http.Error(w, "Upload session not found", http.StatusNotFound)
return
}
// Find the job result
var jobResult *struct {
JobID string
FilesFound int
FilesUploaded int
Success bool
ErrorMsg string
Files []struct {
Name string
Success bool
Error string
FileSize int64
}
}
for i := range uploadSession.GroupedResults {
if uploadSession.GroupedResults[i].JobID == jobID {
jobResult = &uploadSession.GroupedResults[i]
break
}
}
if jobResult == nil {
http.Error(w, "Job not found in session", http.StatusNotFound)
return
}
// Apply file-level filter
var filteredFiles []struct {
Name string
Success bool
Error string
FileSize int64
}
for _, f := range jobResult.Files {
if filter == "success" && !f.Success {
continue
}
if filter == "failed" && f.Success {
continue
}
filteredFiles = append(filteredFiles, f)
}
totalFiles := len(filteredFiles)
if totalFiles == 0 {
data := map[string]interface{}{
"JobID": jobResult.JobID,
"FilesFound": jobResult.FilesFound,
"FilesUploaded": jobResult.FilesUploaded,
"Success": jobResult.Success,
"ErrorMsg": jobResult.ErrorMsg,
"Files": nil,
"FilePage": 1,
"TotalFiles": 0,
"SessionID": sessionID,
"Filter": filter,
"DisplaySuccess": (filter == "success") || (filter != "failed" && jobResult.Success),
}
tmpl := root.WebTemplates
if err := tmpl.ExecuteTemplate(w, "upload_result_card", data); err != nil {
log.Printf("Template execution error: %v", err)
return
}
return
}
// Ensure filePage is within bounds
if filePage > totalFiles {
filePage = totalFiles
}
if filePage < 1 {
filePage = 1
}
// Create a copy of the job result with only the requested file
jobResultCopy := struct {
JobID string
FilesFound int
FilesUploaded int
Success bool
ErrorMsg string
Files []struct {
Name string
Success bool
Error string
FileSize int64
}
}{
JobID: jobResult.JobID,
FilesFound: jobResult.FilesFound,
FilesUploaded: jobResult.FilesUploaded,
Success: jobResult.Success,
ErrorMsg: jobResult.ErrorMsg,
Files: []struct {
Name string
Success bool
Error string
FileSize int64
}{filteredFiles[filePage-1]},
}
// Add pagination info for the template
data := map[string]interface{}{
"JobID": jobResultCopy.JobID,
"FilesFound": jobResultCopy.FilesFound,
"FilesUploaded": jobResultCopy.FilesUploaded,
"Success": jobResultCopy.Success,
"ErrorMsg": jobResultCopy.ErrorMsg,
"Files": jobResultCopy.Files,
"FilePage": filePage,
"TotalFiles": totalFiles,
"SessionID": sessionID,
"Filter": filter,
"DisplaySuccess": (filter == "success") || (filter != "failed" && jobResult.Success),
}
tmpl := root.WebTemplates
if err := tmpl.ExecuteTemplate(w, "upload_result_card", data); err != nil {
log.Printf("Template execution error: %v", err)
return
}
}