package web import ( "bytes" "encoding/csv" "fmt" "io" "log" "math" "net/http" "os" "path/filepath" "sort" "strings" "sync" "time" root "marmic/servicetrade-toolbox" "marmic/servicetrade-toolbox/internal/api" "marmic/servicetrade-toolbox/internal/middleware" ) // DocumentsHandler handles the document upload page func DocumentsHandler(w http.ResponseWriter, r *http.Request) { session, ok := r.Context().Value(middleware.SessionKey).(*api.Session) if !ok { http.Error(w, "Unauthorized", http.StatusUnauthorized) return } tmpl := root.WebTemplates data := map[string]interface{}{ "Title": "Document Uploads", "Session": session, } if r.Header.Get("HX-Request") == "true" { // For HTMX requests, just send the document_upload partial if err := tmpl.ExecuteTemplate(w, "document_upload", data); err != nil { log.Printf("Template execution error: %v", err) http.Error(w, "Internal Server Error", http.StatusInternalServerError) return } } else { // For full page requests, first render document_upload into a buffer var contentBuf bytes.Buffer if err := tmpl.ExecuteTemplate(&contentBuf, "document_upload", data); err != nil { log.Printf("Template execution error: %v", err) http.Error(w, "Internal Server Error", http.StatusInternalServerError) return } // Add the rendered content to the data for the layout data["BodyContent"] = contentBuf.String() // Now render the layout with our content if err := tmpl.ExecuteTemplate(w, "layout.html", data); err != nil { log.Printf("Template execution error: %v", err) http.Error(w, "Internal Server Error", http.StatusInternalServerError) return } } } // ProcessCSVHandler processes a CSV file with job numbers func ProcessCSVHandler(w http.ResponseWriter, r *http.Request) { _, ok := r.Context().Value(middleware.SessionKey).(*api.Session) if !ok { http.Error(w, "Unauthorized", http.StatusUnauthorized) return } // Check if the request method is POST if r.Method != http.MethodPost { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) return } // Parse the multipart form data with a 10MB limit if err := r.ParseMultipartForm(10 << 20); err != nil { http.Error(w, "Unable to parse form: "+err.Error(), http.StatusBadRequest) return } // Get the file from the form file, _, err := r.FormFile("csvFile") if err != nil { http.Error(w, "Error retrieving file: "+err.Error(), http.StatusBadRequest) return } defer file.Close() // Read the CSV data csvData, err := csv.NewReader(file).ReadAll() if err != nil { http.Error(w, "Error reading CSV file: "+err.Error(), http.StatusBadRequest) return } if len(csvData) < 2 { http.Error(w, "CSV file must contain at least a header row and one data row", http.StatusBadRequest) return } // Find the index of the 'id' column headerRow := csvData[0] idColumnIndex := -1 for i, header := range headerRow { if strings.ToLower(strings.TrimSpace(header)) == "id" { idColumnIndex = i break } } // If 'id' column not found, try the first column if idColumnIndex == -1 { idColumnIndex = 0 log.Printf("No 'id' column found in CSV, using first column (header: %s)", headerRow[0]) } else { log.Printf("Found 'id' column at index %d", idColumnIndex) } // Extract job numbers from the CSV var jobNumbers []string for rowIndex, row := range csvData { // Skip header row if rowIndex == 0 { continue } if len(row) > idColumnIndex { // Extract and clean up the job ID jobID := strings.TrimSpace(row[idColumnIndex]) if jobID != "" { jobNumbers = append(jobNumbers, jobID) } } } totalJobs := len(jobNumbers) log.Printf("Extracted %d job IDs from CSV", totalJobs) if totalJobs == 0 { http.Error(w, "No valid job IDs found in the CSV file", http.StatusBadRequest) return } // Create a hidden input with the job IDs jobsValue := strings.Join(jobNumbers, ",") // Generate HTML for the main response (hidden input for job-ids-container) var responseHTML bytes.Buffer responseHTML.WriteString(fmt.Sprintf(``, jobsValue)) responseHTML.WriteString(fmt.Sprintf(`

Found %d job(s) in the CSV file

`, totalJobs)) // Generate out-of-band swap for the preview section - simplified version responseHTML.WriteString(fmt.Sprintf(`

✓ Jobs Detected

Upload to %d job(s)

`, totalJobs)) w.Header().Set("Content-Type", "text/html") w.Write(responseHTML.Bytes()) } // UploadDocumentsHandler handles document uploads to jobs func UploadDocumentsHandler(w http.ResponseWriter, r *http.Request) { session, ok := r.Context().Value(middleware.SessionKey).(*api.Session) if !ok { http.Error(w, "Unauthorized", http.StatusUnauthorized) return } // Check if the request method is POST if r.Method != http.MethodPost { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) return } log.Printf("Starting document upload handler with Content-Length: %.2f MB", float64(r.ContentLength)/(1024*1024)) // Parse the multipart form with a reasonable buffer size // Files larger than this will be saved to temporary files automatically maxMemory := int64(32 << 20) // 32MB in memory, rest to disk if err := r.ParseMultipartForm(maxMemory); err != nil { log.Printf("Error parsing multipart form: %v", err) http.Error(w, "Unable to parse form: "+err.Error(), http.StatusBadRequest) return } defer r.MultipartForm.RemoveAll() // Clean up temporary files // Get job numbers from form values jobNumbers := r.FormValue("jobNumbers") if jobNumbers == "" { log.Printf("No job numbers found in hidden 'jobNumbers' input.") http.Error(w, "No job numbers provided", http.StatusBadRequest) return } log.Printf("Job numbers: %s", jobNumbers) jobs := strings.Split(jobNumbers, ",") if len(jobs) == 0 { http.Error(w, "No valid job numbers provided", http.StatusBadRequest) return } // Get the single document type docType := r.FormValue("documentType") if docType == "" { log.Printf("No document type selected") http.Error(w, "Please select a document type", http.StatusBadRequest) return } log.Printf("Document Type selected: %s", docType) // Get the uploaded files from the 'documentFiles' input fileHeaders := r.MultipartForm.File["documentFiles"] if len(fileHeaders) == 0 { http.Error(w, "No documents selected for upload", http.StatusBadRequest) return } // Store file metadata type FileMetadata struct { FileName string Type string TempFile string // Path to temp file File *os.File // Open file handle for the temp file } var filesToUpload []FileMetadata // Process each uploaded file for _, fileHeader := range fileHeaders { if fileHeader.Filename == "" { log.Printf("Skipping file header with empty filename.") continue } // Open the uploaded file uploadedFile, err := fileHeader.Open() if err != nil { log.Printf("Error opening uploaded file %s: %v", fileHeader.Filename, err) // Optionally: decide if one error should halt all uploads or just skip this file continue // Skip this file } // Prepare metadata metadata := FileMetadata{ FileName: fileHeader.Filename, Type: docType, // Use the single document type for all files } // Create a temp file for the upload (regardless of size to ensure streaming) tempFile, err := os.CreateTemp("", "upload-*"+filepath.Ext(fileHeader.Filename)) if err != nil { log.Printf("Error creating temp file for %s: %v", fileHeader.Filename, err) uploadedFile.Close() continue // Skip this file } // Copy the file content to the temp file bytesCopied, err := io.Copy(tempFile, uploadedFile) uploadedFile.Close() // Close the original multipart file handle if err != nil { log.Printf("Error copying to temp file for %s: %v", fileHeader.Filename, err) tempFile.Close() // Close the temp file handle os.Remove(tempFile.Name()) // Remove the partially written temp file continue // Skip this file } log.Printf("Copied %d bytes of %s to temporary file: %s", bytesCopied, fileHeader.Filename, tempFile.Name()) // Seek back to beginning for later reading by upload goroutines if _, err := tempFile.Seek(0, 0); err != nil { log.Printf("Error seeking temp file for %s: %v", fileHeader.Filename, err) tempFile.Close() os.Remove(tempFile.Name()) continue // Skip this file } metadata.TempFile = tempFile.Name() metadata.File = tempFile // Store the open temp file handle filesToUpload = append(filesToUpload, metadata) } // Ensure temp files associated with metadata are closed and removed later defer func() { log.Println("Running deferred cleanup for temp files...") for _, fm := range filesToUpload { if fm.File != nil { fm.File.Close() if fm.TempFile != "" { err := os.Remove(fm.TempFile) if err != nil && !os.IsNotExist(err) { // Don't log error if file already gone log.Printf("Error cleaning up temp file %s: %v", fm.TempFile, err) } else if err == nil { log.Printf("Cleaned up temp file: %s", fm.TempFile) } } } } }() if len(filesToUpload) == 0 { http.Error(w, "No valid documents could be processed for upload", http.StatusBadRequest) return } log.Printf("Total valid files to upload: %d", len(filesToUpload)) // Concurrent upload with throttling // ServiceTrade API allows 30s of availability per minute (approximately 15 requests at 2s each) const maxConcurrent = 5 // A conservative limit to avoid rate limiting const requestDelay = 300 * time.Millisecond // Delay between requests // Channel for collecting results type UploadResult struct { JobID string DocName string Success bool Error string Data map[string]interface{} FileSize int64 } totalUploads := len(jobs) * len(filesToUpload) resultsChan := make(chan UploadResult, totalUploads) // Create a wait group to track when all uploads are done var wg sync.WaitGroup // Create a semaphore channel to limit concurrent uploads semaphore := make(chan struct{}, maxConcurrent) // Start the upload workers log.Printf("Starting %d upload workers for %d total uploads (%d jobs x %d files)", maxConcurrent, totalUploads, len(jobs), len(filesToUpload)) for _, jobID := range jobs { for _, metadata := range filesToUpload { // Create a closure capture of the metadata for the goroutine // This is crucial because the 'metadata' variable in the loop will change currentMetadata := metadata wg.Add(1) // Launch a goroutine for each job+document combination go func(jobID string, meta FileMetadata) { defer wg.Done() // Acquire a semaphore slot semaphore <- struct{}{} defer func() { <-semaphore }() // Release when done // Add a small delay to avoid overwhelming the API time.Sleep(requestDelay) // Get the file name to use (original filename) fileName := meta.FileName // Use a fresh reader for the temp file for each upload goroutine // Re-open the temp file for reading to avoid race conditions on the file pointer fileHandle, err := os.Open(meta.TempFile) if err != nil { log.Printf("Error re-opening temp file %s for job %s: %v", meta.TempFile, jobID, err) resultsChan <- UploadResult{ JobID: jobID, DocName: fileName, Success: false, Error: fmt.Sprintf("Error preparing file: %v", err), FileSize: 0, } return } defer fileHandle.Close() // Close this handle when done with this upload // Get the expected file size for validation fileInfo, statErr := fileHandle.Stat() // Stat the newly opened handle var expectedSize int64 if statErr == nil { expectedSize = fileInfo.Size() } else { log.Printf("Error getting file info for %s (job %s): %v", fileName, jobID, statErr) // Continue without size check if stat fails, but log it } // Add jitter delay for large batch uploads (more than 10 jobs) if len(jobs) > 10 { jitter := time.Duration(100+(time.Now().UnixNano()%400)) * time.Millisecond time.Sleep(jitter) } // Wrap with size tracker sizeTracker := &readCloserWithSize{reader: fileHandle, size: 0} log.Printf("Starting to stream file %s to job %s from temp file %s", fileName, jobID, meta.TempFile) // Call ServiceTrade API with the file reader uploadStart := time.Now() result, err := session.UploadAttachmentFile(jobID, fileName, meta.Type, sizeTracker) uploadDuration := time.Since(uploadStart) // Get the actual size that was uploaded fileSize := sizeTracker.Size() // Verify the upload size matches the expected file size sizeMatch := true if expectedSize > 0 && math.Abs(float64(expectedSize-fileSize)) > float64(expectedSize)*0.05 { // Allow 5% tolerance sizeMatch = false log.Printf("WARNING: Size mismatch for %s to job %s. Expected: %d, Uploaded: %d", fileName, jobID, expectedSize, fileSize) } if err != nil { log.Printf("Error uploading %s to job %s after %v: %v", fileName, jobID, uploadDuration, err) resultsChan <- UploadResult{ JobID: jobID, DocName: fileName, Success: false, Error: err.Error(), FileSize: fileSize, } } else if !sizeMatch { // API returned success, but we detected size mismatch log.Printf("Corrupted upload of %s to job %s detected. API returned success but file sizes don't match.", fileName, jobID) resultsChan <- UploadResult{ JobID: jobID, DocName: fileName, Success: false, Error: "Upload appears corrupted (file size mismatch)", FileSize: fileSize, } } else { log.Printf("Successfully uploaded %s (%.2f MB) to job %s in %v", fileName, float64(fileSize)/(1024*1024), jobID, uploadDuration) resultsChan <- UploadResult{ JobID: jobID, DocName: fileName, Success: true, Data: result, FileSize: fileSize, } } }(jobID, currentMetadata) // Pass the captured metadata } } // NOTE: The deferred cleanup function for temp files defined earlier will run after this point. // Close the results channel when all uploads are done go func() { wg.Wait() close(resultsChan) log.Println("All upload goroutines finished.") }() // Collect results results := make(map[string][]UploadResult) resultsCount := 0 var totalBytesUploaded int64 for result := range resultsChan { resultsCount++ log.Printf("Received result %d/%d: Job %s, File %s, Success: %v, Size: %.2f MB", resultsCount, totalUploads, result.JobID, result.DocName, result.Success, float64(result.FileSize)/(1024*1024)) if result.Success { totalBytesUploaded += result.FileSize } if _, exists := results[result.JobID]; !exists { results[result.JobID] = []UploadResult{} } results[result.JobID] = append(results[result.JobID], result) } log.Printf("All results collected. Total: %d, Total bytes uploaded: %.2f MB", resultsCount, float64(totalBytesUploaded)/(1024*1024)) // Generate HTML for results var resultHTML bytes.Buffer // Count successes and failures var totalSuccess, totalFailure int for _, jobResults := range results { for _, result := range jobResults { if result.Success { totalSuccess++ } else { totalFailure++ } } } // Add summary section resultHTML.WriteString("
") resultHTML.WriteString("

Upload Results

") resultHTML.WriteString("
") // Total jobs stat resultHTML.WriteString("
") resultHTML.WriteString(fmt.Sprintf("
%d
", len(results))) resultHTML.WriteString("
Total Jobs
") resultHTML.WriteString("
") // Success stat resultHTML.WriteString("
") resultHTML.WriteString(fmt.Sprintf("
%d
", totalSuccess)) resultHTML.WriteString("
Successful Uploads
") resultHTML.WriteString("
") // Failure stat resultHTML.WriteString("
") resultHTML.WriteString(fmt.Sprintf("
%d
", totalFailure)) resultHTML.WriteString("
Failed Uploads
") resultHTML.WriteString("
") // File count stat resultHTML.WriteString("
") // Use resultsCount which reflects total files attempted resultHTML.WriteString(fmt.Sprintf("
%d
", resultsCount)) resultHTML.WriteString("
Files Processed
") resultHTML.WriteString("
") resultHTML.WriteString("
") // End of upload-stats // Add completion message if totalFailure == 0 && resultsCount > 0 { resultHTML.WriteString("

All documents were successfully uploaded to ServiceTrade!

") } else if resultsCount == 0 { resultHTML.WriteString("

No documents were processed for upload.

") } else { resultHTML.WriteString("

Some documents failed to upload. See details below.

") } resultHTML.WriteString("
") // End of upload-summary // Add detailed job results resultHTML.WriteString("
") // Sort job IDs for consistent display sortedJobs := make([]string, 0, len(results)) for jobID := range results { sortedJobs = append(sortedJobs, jobID) } sort.Strings(sortedJobs) for _, jobID := range sortedJobs { jobResults := results[jobID] // Determine job success status based on results for *this job* jobHasSuccess := false jobHasFailure := false for _, result := range jobResults { if result.Success { jobHasSuccess = true } else { jobHasFailure = true } } // Job result row styling jobClass := "neutral" // Default if somehow no results for a job ID if jobHasSuccess && !jobHasFailure { jobClass = "success" } else if jobHasFailure { jobClass = "error" // Prioritize showing error if any file failed for this job } resultHTML.WriteString(fmt.Sprintf("
", jobClass)) resultHTML.WriteString(fmt.Sprintf("
Job ID: %s
", jobID)) // Wrap ID for better styling // File results if len(jobResults) > 0 { resultHTML.WriteString("
") // Sort file results by name for consistency sort.Slice(jobResults, func(i, j int) bool { return jobResults[i].DocName < jobResults[j].DocName }) for _, result := range jobResults { fileClass := "success" icon := "✓" message := "Successfully uploaded" if !result.Success { fileClass = "error" icon = "✗" // Sanitize error message slightly for HTML display if needed message = strings.ReplaceAll(result.Error, "<", "<") message = strings.ReplaceAll(message, ">", ">") } resultHTML.WriteString(fmt.Sprintf("
", fileClass)) resultHTML.WriteString(fmt.Sprintf("%s", icon)) resultHTML.WriteString(fmt.Sprintf("%s:", result.DocName)) resultHTML.WriteString(fmt.Sprintf("%s", message)) resultHTML.WriteString("
") } resultHTML.WriteString("
") // End of file-results } else { resultHTML.WriteString("

No file upload results for this job.

") // More specific message } resultHTML.WriteString("
") // End of job-result } resultHTML.WriteString("
") // End of job-results w.Header().Set("Content-Type", "text/html") w.Write(resultHTML.Bytes()) } // readCloserWithSize is a custom io.Reader that counts the bytes read type readCloserWithSize struct { reader io.ReadCloser size int64 } func (r *readCloserWithSize) Read(p []byte) (n int, err error) { n, err = r.reader.Read(p) r.size += int64(n) return n, err } func (r *readCloserWithSize) Close() error { if r.reader != nil { return r.reader.Close() } return nil // Allow closing nil reader safely } // Size returns the current size of data read func (r *readCloserWithSize) Size() int64 { return r.size } // DocumentFieldAddHandler generates a new document field for the form // REMOVED as it's no longer needed with the multi-file input // DocumentFieldRemoveHandler handles the removal of a document field // REMOVED as it's no longer needed with the multi-file input