package web
import (
"bytes"
"encoding/csv"
"fmt"
"io"
"log"
"math"
"net/http"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
root "marmic/servicetrade-toolbox"
"marmic/servicetrade-toolbox/internal/api"
"marmic/servicetrade-toolbox/internal/middleware"
)
// DocumentsHandler handles the document upload page
func DocumentsHandler(w http.ResponseWriter, r *http.Request) {
session, ok := r.Context().Value(middleware.SessionKey).(*api.Session)
if !ok {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
tmpl := root.WebTemplates
data := map[string]interface{}{
"Title": "Document Uploads",
"Session": session,
}
if r.Header.Get("HX-Request") == "true" {
// For HTMX requests, just send the document_upload partial
if err := tmpl.ExecuteTemplate(w, "document_upload", data); err != nil {
log.Printf("Template execution error: %v", err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
return
}
} else {
// For full page requests, first render document_upload into a buffer
var contentBuf bytes.Buffer
if err := tmpl.ExecuteTemplate(&contentBuf, "document_upload", data); err != nil {
log.Printf("Template execution error: %v", err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
return
}
// Add the rendered content to the data for the layout
data["BodyContent"] = contentBuf.String()
// Now render the layout with our content
if err := tmpl.ExecuteTemplate(w, "layout.html", data); err != nil {
log.Printf("Template execution error: %v", err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
return
}
}
}
// ProcessCSVHandler processes a CSV file with job numbers
func ProcessCSVHandler(w http.ResponseWriter, r *http.Request) {
_, ok := r.Context().Value(middleware.SessionKey).(*api.Session)
if !ok {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
// Check if the request method is POST
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Parse the multipart form data with a 10MB limit
if err := r.ParseMultipartForm(10 << 20); err != nil {
http.Error(w, "Unable to parse form: "+err.Error(), http.StatusBadRequest)
return
}
// Get the file from the form
file, _, err := r.FormFile("csvFile")
if err != nil {
http.Error(w, "Error retrieving file: "+err.Error(), http.StatusBadRequest)
return
}
defer file.Close()
// Read the CSV data
csvData, err := csv.NewReader(file).ReadAll()
if err != nil {
http.Error(w, "Error reading CSV file: "+err.Error(), http.StatusBadRequest)
return
}
if len(csvData) < 2 {
http.Error(w, "CSV file must contain at least a header row and one data row", http.StatusBadRequest)
return
}
// Find the index of the 'id' column
headerRow := csvData[0]
idColumnIndex := -1
for i, header := range headerRow {
if strings.ToLower(strings.TrimSpace(header)) == "id" {
idColumnIndex = i
break
}
}
// If 'id' column not found, try the first column
if idColumnIndex == -1 {
idColumnIndex = 0
log.Printf("No 'id' column found in CSV, using first column (header: %s)", headerRow[0])
} else {
log.Printf("Found 'id' column at index %d", idColumnIndex)
}
// Extract job numbers from the CSV
var jobNumbers []string
for rowIndex, row := range csvData {
// Skip header row
if rowIndex == 0 {
continue
}
if len(row) > idColumnIndex {
// Extract and clean up the job ID
jobID := strings.TrimSpace(row[idColumnIndex])
if jobID != "" {
jobNumbers = append(jobNumbers, jobID)
}
}
}
totalJobs := len(jobNumbers)
log.Printf("Extracted %d job IDs from CSV", totalJobs)
if totalJobs == 0 {
http.Error(w, "No valid job IDs found in the CSV file", http.StatusBadRequest)
return
}
// Create a hidden input with the job IDs
jobsValue := strings.Join(jobNumbers, ",")
// Generate HTML for the main response (hidden input for job-ids-container)
var responseHTML bytes.Buffer
responseHTML.WriteString(fmt.Sprintf(``, jobsValue))
responseHTML.WriteString(fmt.Sprintf(`
Found %d job(s) in the CSV file
`, totalJobs))
// Generate out-of-band swap for the preview section - simplified version
responseHTML.WriteString(fmt.Sprintf(`
✓ Jobs Detected
Upload to %d job(s)
`, totalJobs))
w.Header().Set("Content-Type", "text/html")
w.Write(responseHTML.Bytes())
}
// UploadDocumentsHandler handles document uploads to jobs
func UploadDocumentsHandler(w http.ResponseWriter, r *http.Request) {
session, ok := r.Context().Value(middleware.SessionKey).(*api.Session)
if !ok {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
// Check if the request method is POST
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
log.Printf("Starting document upload handler with Content-Length: %.2f MB",
float64(r.ContentLength)/(1024*1024))
// Parse the multipart form with a reasonable buffer size
// Files larger than this will be saved to temporary files automatically
maxMemory := int64(32 << 20) // 32MB in memory, rest to disk
if err := r.ParseMultipartForm(maxMemory); err != nil {
log.Printf("Error parsing multipart form: %v", err)
http.Error(w, "Unable to parse form: "+err.Error(), http.StatusBadRequest)
return
}
// Get job numbers from form values
jobNumbers := r.FormValue("jobNumbers")
if jobNumbers == "" {
jobNumbers = r.FormValue("job-ids")
if jobNumbers == "" {
log.Printf("No job numbers found in form values")
http.Error(w, "No job numbers provided", http.StatusBadRequest)
return
}
}
log.Printf("Job numbers: %s", jobNumbers)
jobs := strings.Split(jobNumbers, ",")
if len(jobs) == 0 {
http.Error(w, "No valid job numbers provided", http.StatusBadRequest)
return
}
// Store file metadata
type FileMetadata struct {
FormField string
FileName string
Type string
CustomName string
TempFile string // Path to temp file if we create one
FileData []byte // File data if small enough to keep in memory
File *os.File // Open file handle if we're using a temp file
}
var filesToUpload []FileMetadata
// Process each file upload in the form
for formField, fileHeaders := range r.MultipartForm.File {
if !strings.HasPrefix(formField, "document-file-") {
continue
}
if len(fileHeaders) == 0 {
continue
}
fileHeader := fileHeaders[0]
if fileHeader.Filename == "" {
continue
}
// Get suffix for related form fields
suffix := strings.TrimPrefix(formField, "document-file-")
typeField := "document-type-" + suffix
nameField := "document-name-" + suffix
// Get document type and custom name
docType := r.FormValue(typeField)
if docType == "" {
log.Printf("No document type for file %s, skipping", fileHeader.Filename)
continue
}
customName := r.FormValue(nameField)
if customName != "" && !strings.Contains(customName, ".") {
ext := filepath.Ext(fileHeader.Filename)
if ext != "" {
customName = customName + ext
}
}
// Open the uploaded file
uploadedFile, err := fileHeader.Open()
if err != nil {
log.Printf("Error opening uploaded file %s: %v", fileHeader.Filename, err)
continue
}
// Prepare metadata
metadata := FileMetadata{
FormField: formField,
FileName: fileHeader.Filename,
Type: docType,
CustomName: customName,
}
// Create a temp file for the upload (regardless of size to ensure streaming)
tempFile, err := os.CreateTemp("", "upload-*"+filepath.Ext(fileHeader.Filename))
if err != nil {
log.Printf("Error creating temp file for %s: %v", fileHeader.Filename, err)
uploadedFile.Close()
continue
}
// Copy the file content to the temp file
bytesCopied, err := io.Copy(tempFile, uploadedFile)
uploadedFile.Close()
if err != nil {
log.Printf("Error copying to temp file for %s: %v", fileHeader.Filename, err)
tempFile.Close()
os.Remove(tempFile.Name())
continue
}
log.Printf("Copied %d bytes of %s to temporary file: %s",
bytesCopied, fileHeader.Filename, tempFile.Name())
// Seek back to beginning for later reading
tempFile.Seek(0, 0)
metadata.TempFile = tempFile.Name()
metadata.File = tempFile // Store the open file handle
filesToUpload = append(filesToUpload, metadata)
}
if len(filesToUpload) == 0 {
http.Error(w, "No valid documents selected for upload", http.StatusBadRequest)
return
}
log.Printf("Total valid files to upload: %d", len(filesToUpload))
// Concurrent upload with throttling
// ServiceTrade API allows 30s of availability per minute (approximately 15 requests at 2s each)
const maxConcurrent = 5 // A conservative limit to avoid rate limiting
const requestDelay = 300 * time.Millisecond // Delay between requests
// Channel for collecting results
type UploadResult struct {
JobID string
DocName string
Success bool
Error string
Data map[string]interface{}
FileSize int64
}
totalUploads := len(jobs) * len(filesToUpload)
resultsChan := make(chan UploadResult, totalUploads)
// Create a wait group to track when all uploads are done
var wg sync.WaitGroup
// Create a semaphore channel to limit concurrent uploads
semaphore := make(chan struct{}, maxConcurrent)
// Start the upload workers
log.Printf("Starting %d upload workers for %d total uploads",
maxConcurrent, totalUploads)
for _, jobID := range jobs {
for _, metadata := range filesToUpload {
wg.Add(1)
// Launch a goroutine for each job+document combination
go func(jobID string, metadata FileMetadata) {
defer wg.Done()
// Acquire a semaphore slot
semaphore <- struct{}{}
defer func() { <-semaphore }() // Release when done
// Add a small delay to avoid overwhelming the API
time.Sleep(requestDelay)
// Get the file name to use (custom name or original)
fileName := metadata.FileName
if metadata.CustomName != "" {
fileName = metadata.CustomName
}
// Create a fresh file reader for each upload to avoid sharing file handles
fileHandle, err := os.Open(metadata.TempFile)
if err != nil {
log.Printf("Error opening temp file for %s: %v", fileName, err)
resultsChan <- UploadResult{
JobID: jobID,
DocName: fileName,
Success: false,
Error: fmt.Sprintf("Error preparing file: %v", err),
FileSize: 0,
}
return
}
defer fileHandle.Close() // Close this handle when done with this upload
// Get the expected file size for validation
fileInfo, statErr := os.Stat(metadata.TempFile)
var expectedSize int64
if statErr == nil {
expectedSize = fileInfo.Size()
}
// Add jitter delay for large batch uploads (more than 10 jobs)
if len(jobs) > 10 {
jitter := time.Duration(100+(time.Now().UnixNano()%400)) * time.Millisecond
time.Sleep(jitter)
}
// Wrap with size tracker
sizeTracker := &readCloserWithSize{reader: fileHandle, size: 0}
fileReader := sizeTracker
// Log streaming progress
log.Printf("Starting to stream file %s to job %s from fresh file handle", fileName, jobID)
// Call ServiceTrade API with the file reader
uploadStart := time.Now()
result, err := session.UploadAttachmentFile(jobID, fileName, metadata.Type, fileReader)
uploadDuration := time.Since(uploadStart)
// Get the actual size that was uploaded
fileSize := sizeTracker.Size()
// Verify the upload size matches the expected file size
sizeMatch := true
if expectedSize > 0 && math.Abs(float64(expectedSize-fileSize)) > float64(expectedSize)*0.05 {
sizeMatch = false
log.Printf("WARNING: Size mismatch for %s to job %s. Expected: %d, Uploaded: %d",
fileName, jobID, expectedSize, fileSize)
}
if err != nil {
log.Printf("Error uploading %s to job %s after %v: %v",
fileName, jobID, uploadDuration, err)
resultsChan <- UploadResult{
JobID: jobID,
DocName: fileName,
Success: false,
Error: err.Error(),
FileSize: fileSize,
}
} else if !sizeMatch {
// API returned success, but we detected size mismatch
log.Printf("Corrupted upload of %s to job %s detected. API returned success but file sizes don't match.",
fileName, jobID)
resultsChan <- UploadResult{
JobID: jobID,
DocName: fileName,
Success: false,
Error: "Upload appears corrupted (file size mismatch)",
FileSize: fileSize,
}
} else {
log.Printf("Successfully uploaded %s (%.2f MB) to job %s in %v",
fileName, float64(fileSize)/(1024*1024), jobID, uploadDuration)
resultsChan <- UploadResult{
JobID: jobID,
DocName: fileName,
Success: true,
Data: result,
FileSize: fileSize,
}
}
}(jobID, metadata)
}
}
// Clean up temp files when all uploads are done
defer func() {
for _, metadata := range filesToUpload {
if metadata.File != nil {
metadata.File.Close()
if metadata.TempFile != "" {
os.Remove(metadata.TempFile)
log.Printf("Cleaned up temp file: %s", metadata.TempFile)
}
}
}
}()
// Close the results channel when all uploads are done
go func() {
wg.Wait()
close(resultsChan)
}()
// Collect results
results := make(map[string][]UploadResult)
resultsCount := 0
var totalBytesUploaded int64
for result := range resultsChan {
resultsCount++
log.Printf("Received result %d/%d: Job %s, File %s, Success: %v, Size: %.2f MB",
resultsCount, totalUploads, result.JobID, result.DocName, result.Success,
float64(result.FileSize)/(1024*1024))
if result.Success {
totalBytesUploaded += result.FileSize
}
if _, exists := results[result.JobID]; !exists {
results[result.JobID] = []UploadResult{}
}
results[result.JobID] = append(results[result.JobID], result)
}
log.Printf("All results collected. Total: %d, Total bytes uploaded: %.2f MB",
resultsCount, float64(totalBytesUploaded)/(1024*1024))
// Generate HTML for results
var resultHTML bytes.Buffer
// Count successes and failures
var totalSuccess, totalFailure int
for _, jobResults := range results {
for _, result := range jobResults {
if result.Success {
totalSuccess++
} else {
totalFailure++
}
}
}
// Add summary section
resultHTML.WriteString("
") // End of file-results
} else {
resultHTML.WriteString("
No files processed for this job.
")
}
resultHTML.WriteString("
") // End of job-result
}
resultHTML.WriteString("
") // End of job-results
w.Header().Set("Content-Type", "text/html")
w.Write(resultHTML.Bytes())
}
// readCloserWithSize is a custom io.Reader that counts the bytes read
type readCloserWithSize struct {
reader io.ReadCloser
size int64
}
func (r *readCloserWithSize) Read(p []byte) (n int, err error) {
n, err = r.reader.Read(p)
r.size += int64(n)
return n, err
}
func (r *readCloserWithSize) Close() error {
return r.reader.Close()
}
// Size returns the current size of data read
func (r *readCloserWithSize) Size() int64 {
return r.size
}
// DocumentFieldAddHandler generates a new document field for the form
func DocumentFieldAddHandler(w http.ResponseWriter, r *http.Request) {
// Generate a random ID for the new field
newId := fmt.Sprintf("%d", time.Now().UnixNano())
// Create HTML for a new document row
html := fmt.Sprintf(`
`, newId, newId, newId, newId, newId, newId, newId, newId, newId)
w.Header().Set("Content-Type", "text/html")
w.Write([]byte(html))
}
// DocumentFieldRemoveHandler handles the removal of a document field
func DocumentFieldRemoveHandler(w http.ResponseWriter, r *http.Request) {
// We read the ID but don't need to use it for simple removal
_ = r.URL.Query().Get("id")
// Count how many document rows exist
// For simplicity, we'll just return an empty response to remove the field
// In a complete implementation, we'd check if this is the last field and handle that case
w.Header().Set("Content-Type", "text/html")
w.Write([]byte(""))
}