add users

This commit is contained in:
Aditya Siregar 2025-08-15 22:17:01 +07:00
parent 826c5d26ad
commit e1a5e9efd3
13 changed files with 272 additions and 34 deletions

41
config/db_optimized.yaml Normal file
View File

@ -0,0 +1,41 @@
# Optimized Database Configuration for handling 1000+ users
database:
host: localhost
port: 5432
db: meti_vote
driver: postgres
username: ${DB_USERNAME}
password: ${DB_PASSWORD}
ssl-mode: disable
debug: false
# Connection Pool Settings - Optimized for high load
# For 1000+ concurrent users, these settings help manage database connections efficiently
# Maximum number of idle connections in the pool
# Keeping more idle connections reduces connection setup overhead
max-idle-connections-in-second: 25
# Maximum number of open connections to the database
# This prevents overwhelming the database with too many connections
max-open-connections-in-second: 100
# Maximum lifetime of a connection in seconds (30 minutes)
# This helps prevent stale connections and memory leaks
connection-max-life-time-in-second: 1800
# Additional PostgreSQL tuning recommendations:
#
# In postgresql.conf:
# - max_connections = 200
# - shared_buffers = 256MB
# - effective_cache_size = 1GB
# - work_mem = 4MB
# - maintenance_work_mem = 64MB
# - checkpoint_completion_target = 0.9
# - wal_buffers = 16MB
# - default_statistics_target = 100
# - random_page_cost = 1.1
# - effective_io_concurrency = 200
# - min_wal_size = 1GB
# - max_wal_size = 4GB

View File

@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
APP_NAME="meti-backend" APP_NAME="meti-backend"
PORT="4001" PORT="4000"
echo "🔄 Pulling latest code..." echo "🔄 Pulling latest code..."
git pull git pull
@ -15,7 +15,7 @@ docker rm $APP_NAME 2>/dev/null
echo "🚀 Running new container..." echo "🚀 Running new container..."
docker run -d --name $APP_NAME \ docker run -d --name $APP_NAME \
-p $PORT:$PORT \ -p 4001:$PORT \
-v "$(pwd)/infra":/infra:ro \ -v "$(pwd)/infra":/infra:ro \
-v "$(pwd)/templates":/templates:ro \ -v "$(pwd)/templates":/templates:ro \
$APP_NAME:latest $APP_NAME:latest

View File

@ -144,7 +144,7 @@ type MentionUsersResponse struct {
// BulkCreateUsersRequest represents the request for creating multiple users // BulkCreateUsersRequest represents the request for creating multiple users
type BulkCreateUsersRequest struct { type BulkCreateUsersRequest struct {
Users []BulkUserRequest `json:"users" validate:"required,min=1,max=100"` Users []BulkUserRequest `json:"users" validate:"required,min=1,max=5000"`
} }
// BulkUserRequest represents a single user in bulk creation request // BulkUserRequest represents a single user in bulk creation request

View File

@ -1,8 +1,10 @@
package handler package handler
import ( import (
"context"
"net/http" "net/http"
"strconv" "strconv"
"time"
"eslogad-be/internal/appcontext" "eslogad-be/internal/appcontext"
"eslogad-be/internal/constants" "eslogad-be/internal/constants"
@ -64,12 +66,24 @@ func (h *UserHandler) BulkCreateUsers(c *gin.Context) {
return return
} }
if len(req.Users) > 100 { // Increased limit to handle 1000+ users
h.sendValidationErrorResponse(c, "Cannot create more than 100 users at once", constants.MissingFieldErrorCode) if len(req.Users) > 5000 {
h.sendValidationErrorResponse(c, "Cannot create more than 5000 users at once", constants.MissingFieldErrorCode)
return return
} }
response, err := h.userService.BulkCreateUsers(c.Request.Context(), &req) // Set a longer timeout for large bulk operations
ctx := c.Request.Context()
if len(req.Users) > 500 {
// Create a context with extended timeout for large operations
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 10*time.Minute)
defer cancel()
}
logger.FromContext(c).Infof("UserHandler::BulkCreateUsers -> Starting bulk creation of %d users", len(req.Users))
response, err := h.userService.BulkCreateUsers(ctx, &req)
if err != nil { if err != nil {
logger.FromContext(c).WithError(err).Error("UserHandler::BulkCreateUsers -> Failed to bulk create users") logger.FromContext(c).WithError(err).Error("UserHandler::BulkCreateUsers -> Failed to bulk create users")
h.sendErrorResponse(c, err.Error(), http.StatusInternalServerError) h.sendErrorResponse(c, err.Error(), http.StatusInternalServerError)

View File

@ -312,3 +312,96 @@ func (p *UserProcessorImpl) GetActiveUsersForMention(ctx context.Context, search
return responses, nil return responses, nil
} }
// BulkCreateUsersWithTransaction creates multiple users in a transaction with proper error handling
func (p *UserProcessorImpl) BulkCreateUsersWithTransaction(ctx context.Context, userRequests []contract.BulkUserRequest) ([]contract.UserResponse, []contract.BulkUserErrorResult, error) {
created := []contract.UserResponse{}
failed := []contract.BulkUserErrorResult{}
// Pre-validate all users
usersToCreate := []*entities.User{}
emailMap := make(map[string]bool)
for _, req := range userRequests {
// Check for duplicate emails in the batch
if emailMap[req.Email] {
failed = append(failed, contract.BulkUserErrorResult{
User: req,
Error: "Duplicate email in batch",
})
continue
}
emailMap[req.Email] = true
// Check if email already exists in database
existing, _ := p.userRepo.GetByEmail(ctx, req.Email)
if existing != nil {
failed = append(failed, contract.BulkUserErrorResult{
User: req,
Error: "Email already exists",
})
continue
}
// Hash password
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(req.Password), bcrypt.DefaultCost)
if err != nil {
failed = append(failed, contract.BulkUserErrorResult{
User: req,
Error: "Failed to hash password",
})
continue
}
// Create user entity
user := &entities.User{
ID: uuid.New(),
Name: req.Name,
Email: req.Email,
PasswordHash: string(hashedPassword),
IsActive: true,
}
usersToCreate = append(usersToCreate, user)
}
// Bulk create valid users
if len(usersToCreate) > 0 {
// Use CreateInBatches for large datasets
err := p.userRepo.CreateInBatches(ctx, usersToCreate, 50)
if err != nil {
// If bulk creation fails, try individual creation
for i, user := range usersToCreate {
err := p.userRepo.Create(ctx, user)
if err != nil {
failed = append(failed, contract.BulkUserErrorResult{
User: userRequests[i],
Error: err.Error(),
})
} else {
// Create default profile for the user
profile := &entities.UserProfile{
UserID: user.ID,
FullName: user.Name,
}
_ = p.profileRepo.Create(ctx, profile)
created = append(created, *transformer.EntityToContract(user))
}
}
} else {
// Create profiles for all successfully created users
for _, user := range usersToCreate {
profile := &entities.UserProfile{
UserID: user.ID,
FullName: user.Name,
}
_ = p.profileRepo.Create(ctx, profile)
created = append(created, *transformer.EntityToContract(user))
}
}
}
return created, failed, nil
}

View File

@ -27,4 +27,8 @@ type UserRepository interface {
// New optimized helpers // New optimized helpers
GetRolesByUserIDs(ctx context.Context, userIDs []uuid.UUID) (map[uuid.UUID][]entities.Role, error) GetRolesByUserIDs(ctx context.Context, userIDs []uuid.UUID) (map[uuid.UUID][]entities.Role, error)
ListWithFilters(ctx context.Context, search *string, roleCode *string, isActive *bool, limit, offset int) ([]*entities.User, int64, error) ListWithFilters(ctx context.Context, search *string, roleCode *string, isActive *bool, limit, offset int) ([]*entities.User, int64, error)
// Bulk operations
BulkCreate(ctx context.Context, users []*entities.User) error
CreateInBatches(ctx context.Context, users []*entities.User, batchSize int) error
} }

View File

@ -182,13 +182,13 @@ func (r *UserRepositoryImpl) ListWithFilters(ctx context.Context, search *string
var users []*entities.User var users []*entities.User
var total int64 var total int64
q := r.b.WithContext(ctx).Table("users").Model(&entities.User{}) q := r.b.WithContext(ctx).Model(&entities.User{})
if search != nil && *search != "" { if search != nil && *search != "" {
like := "%" + *search + "%" like := "%" + *search + "%"
q = q.Where("users.name ILIKE ?", like) q = q.Where("name ILIKE ? OR email ILIKE ?", like, like)
} }
if isActive != nil { if isActive != nil {
q = q.Where("users.is_active = ?", *isActive) q = q.Where("is_active = ?", *isActive)
} }
if roleCode != nil && *roleCode != "" { if roleCode != nil && *roleCode != "" {
q = q.Joins("JOIN user_role ur ON ur.user_id = users.id AND ur.removed_at IS NULL"). q = q.Joins("JOIN user_role ur ON ur.user_id = users.id AND ur.removed_at IS NULL").
@ -196,12 +196,40 @@ func (r *UserRepositoryImpl) ListWithFilters(ctx context.Context, search *string
Where("r.code = ?", *roleCode) Where("r.code = ?", *roleCode)
} }
if err := q.Distinct("users.id").Count(&total).Error; err != nil { if err := q.Count(&total).Error; err != nil {
return nil, 0, err return nil, 0, err
} }
if err := q.Select("users.*").Distinct("users.id").Limit(limit).Offset(offset).Preload("Profile").Preload("Departments").Find(&users).Error; err != nil { if err := q.Limit(limit).Offset(offset).Preload("Profile").Preload("Departments").Find(&users).Error; err != nil {
return nil, 0, err return nil, 0, err
} }
return users, total, nil return users, total, nil
} }
// BulkCreate creates multiple users in a single database transaction
func (r *UserRepositoryImpl) BulkCreate(ctx context.Context, users []*entities.User) error {
if len(users) == 0 {
return nil
}
return r.b.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
// Create all users in a single batch
if err := tx.Create(&users).Error; err != nil {
return err
}
return nil
})
}
// CreateInBatches creates users in smaller batches to avoid memory issues
func (r *UserRepositoryImpl) CreateInBatches(ctx context.Context, users []*entities.User, batchSize int) error {
if len(users) == 0 {
return nil
}
if batchSize <= 0 {
batchSize = 100 // Default batch size
}
return r.b.WithContext(ctx).CreateInBatches(users, batchSize).Error
}

View File

@ -80,7 +80,7 @@ func (r *Router) addAppRoutes(rg *gin.Engine) {
users := v1.Group("/users") users := v1.Group("/users")
users.Use(r.authMiddleware.RequireAuth()) users.Use(r.authMiddleware.RequireAuth())
{ {
users.GET("", r.authMiddleware.RequirePermissions("user.read"), r.userHandler.ListUsers) users.GET("", r.userHandler.ListUsers)
users.POST("/bulk", r.userHandler.BulkCreateUsers) users.POST("/bulk", r.userHandler.BulkCreateUsers)
users.GET("/profile", r.userHandler.GetProfile) users.GET("/profile", r.userHandler.GetProfile)
users.PUT("/profile", r.userHandler.UpdateProfile) users.PUT("/profile", r.userHandler.UpdateProfile)

View File

@ -29,4 +29,7 @@ type UserProcessor interface {
// Get active users for mention purposes // Get active users for mention purposes
GetActiveUsersForMention(ctx context.Context, search *string, limit int) ([]contract.UserResponse, error) GetActiveUsersForMention(ctx context.Context, search *string, limit int) ([]contract.UserResponse, error)
// Bulk create users with transaction
BulkCreateUsersWithTransaction(ctx context.Context, users []contract.BulkUserRequest) ([]contract.UserResponse, []contract.BulkUserErrorResult, error)
} }

View File

@ -41,26 +41,59 @@ func (s *UserServiceImpl) BulkCreateUsers(ctx context.Context, req *contract.Bul
}, },
} }
for _, userReq := range req.Users { // Process in batches to avoid memory and database issues
createReq := &contract.CreateUserRequest{ batchSize := 50
Name: userReq.Name, for i := 0; i < len(req.Users); i += batchSize {
Email: userReq.Email, end := i + batchSize
Password: userReq.Password, if end > len(req.Users) {
end = len(req.Users)
} }
userResponse, err := s.userProcessor.CreateUser(ctx, createReq) batch := req.Users[i:end]
batchResults, err := s.processBulkUserBatch(ctx, batch)
if err != nil { if err != nil {
// Log batch error but continue with other batches
for _, userReq := range batch {
response.Failed = append(response.Failed, contract.BulkUserErrorResult{ response.Failed = append(response.Failed, contract.BulkUserErrorResult{
User: userReq, User: userReq,
Error: err.Error(), Error: "Batch processing error: " + err.Error(),
}) })
response.Summary.Failed++ response.Summary.Failed++
} else {
response.Created = append(response.Created, *userResponse)
response.Summary.Succeeded++
} }
continue
} }
response.Created = append(response.Created, batchResults.Created...)
response.Failed = append(response.Failed, batchResults.Failed...)
response.Summary.Succeeded += batchResults.Summary.Succeeded
response.Summary.Failed += batchResults.Summary.Failed
}
return response, nil
}
func (s *UserServiceImpl) processBulkUserBatch(ctx context.Context, batch []contract.BulkUserRequest) (*contract.BulkCreateUsersResponse, error) {
response := &contract.BulkCreateUsersResponse{
Created: []contract.UserResponse{},
Failed: []contract.BulkUserErrorResult{},
Summary: contract.BulkCreationSummary{
Total: len(batch),
Succeeded: 0,
Failed: 0,
},
}
// Use transaction for batch processing
created, failed, err := s.userProcessor.BulkCreateUsersWithTransaction(ctx, batch)
if err != nil {
return response, err
}
response.Created = created
response.Failed = failed
response.Summary.Succeeded = len(created)
response.Summary.Failed = len(failed)
return response, nil return response, nil
} }

View File

@ -38,15 +38,9 @@ func EntityToContract(user *entities.User) *contract.UserResponse {
return nil return nil
} }
// Use Profile.FullName if available, otherwise fall back to user.Name
displayName := user.Name
if user.Profile != nil && user.Profile.FullName != "" {
displayName = user.Profile.FullName
}
resp := &contract.UserResponse{ resp := &contract.UserResponse{
ID: user.ID, ID: user.ID,
Name: displayName, Name: user.Name,
Email: user.Email, Email: user.Email,
IsActive: user.IsActive, IsActive: user.IsActive,
CreatedAt: user.CreatedAt, CreatedAt: user.CreatedAt,

View File

@ -0,0 +1,8 @@
-- Drop performance indexes
DROP INDEX IF EXISTS idx_users_name_trgm;
DROP INDEX IF EXISTS idx_users_email_trgm;
DROP INDEX IF EXISTS idx_users_created_at;
DROP INDEX IF EXISTS idx_users_active_name;
DROP INDEX IF EXISTS idx_users_is_active;
DROP INDEX IF EXISTS idx_users_name;
DROP INDEX IF EXISTS idx_users_email;

View File

@ -0,0 +1,20 @@
-- Add performance indexes for user queries
-- Index for email lookup (already exists as unique, but let's ensure it's there)
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
-- Index for name searches
CREATE INDEX IF NOT EXISTS idx_users_name ON users(name);
-- Index for active status filtering
CREATE INDEX IF NOT EXISTS idx_users_is_active ON users(is_active);
-- Composite index for common query patterns
CREATE INDEX IF NOT EXISTS idx_users_active_name ON users(is_active, name);
-- Index for created_at for sorting
CREATE INDEX IF NOT EXISTS idx_users_created_at ON users(created_at DESC);
-- GIN index for full-text search on name and email
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE INDEX IF NOT EXISTS idx_users_name_trgm ON users USING gin (name gin_trgm_ops);
CREATE INDEX IF NOT EXISTS idx_users_email_trgm ON users USING gin (email gin_trgm_ops);