Compare commits

..

2 commits

Author SHA1 Message Date
nquidox
13ebb27335 small refactor
All checks were successful
/ Make image (push) Successful in 53s
2026-02-28 10:53:33 +03:00
nquidox
c955615fb1 context fix 2026-02-28 10:53:02 +03:00
10 changed files with 87 additions and 93 deletions

View file

@ -1,17 +1,24 @@
package main
import (
"context"
"os"
"os/signal"
"syscall"
"task-processor/config"
"task-processor/internal/app"
"task-processor/internal/logging"
)
func main() {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer cancel()
c := config.NewConfig()
logging.LogSetup(c.LogLevel)
appl := app.New(c)
appl.Run()
appl.Run(ctx)
}

View file

@ -3,11 +3,9 @@ package app
import (
"context"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"net"
"os"
"os/signal"
"runtime"
"syscall"
"task-processor/config"
"task-processor/internal/appState"
"task-processor/internal/parsers"
@ -28,6 +26,8 @@ type App struct {
state *appState.State
network *remote.Network
numCPUs int
metricsSrv *router.Handler
taskApiSrv *grpc.Server
}
func New(c *config.Config) *App {
@ -38,6 +38,14 @@ func New(c *config.Config) *App {
st := appState.NewState(numCPUs, c.CheckPeriod, c.TasksCfg.RetryCount, c.TasksCfg.RetryMinutes)
server := newServer(st)
//metrics
mSrv := router.NewHandler(router.Deps{
Addr: net.JoinHostPort(c.Metrics.Host, c.Metrics.Port),
GinMode: c.Metrics.GinMode,
})
return &App{
config: c,
checkPeriod: time.Duration(c.CheckPeriod),
@ -47,33 +55,31 @@ func New(c *config.Config) *App {
state: st,
network: remote.NewHandler(),
numCPUs: numCPUs,
metricsSrv: mSrv,
taskApiSrv: server,
}
}
func (app *App) Run() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
func (app *App) Run(ctx context.Context) {
log.Info("Application start")
addr := net.JoinHostPort(app.config.GrpcCfg.ServerHost, app.config.GrpcCfg.ServerPort)
log.WithFields(log.Fields{
"Service address": app.config.GrpcCfg.ServerHost + ":" + app.config.GrpcCfg.ServerPort,
"Service address": addr,
"Number of CPUs": app.numCPUs,
}).Debug("App settings")
//metrics
mSrv := router.NewHandler(router.Deps{
Addr: net.JoinHostPort(app.config.Metrics.Host, app.config.Metrics.Port),
GinMode: app.config.Metrics.GinMode,
})
errChan := make(chan error, 16)
//main
server := newServer(app)
apiClient := newApiClient(app.config.GrpcCfg.ApiClientHost + ":" + app.config.GrpcCfg.ApiClientPort)
period := time.NewTicker(app.checkPeriod * time.Hour)
defer period.Stop()
sender := make(chan shared.TaskResult, app.numCPUs*10)
defer close(sender)
// external scrapper
surugayaScrapper := newSurugayaScrapperClient(app.config.GrpcCfg.SurugayaScrapperHost + ":" + app.config.GrpcCfg.SurugayaScrapperPort)
@ -82,11 +88,11 @@ func (app *App) Run() {
handlers := make(map[string]parsers.TaskHandler)
if app.config.OriginEnabled.Surugaya {
handlers[shared.OriginSurugaya] = parsers.NewSurugayaParser(ctx, surugayaScrapper)
handlers[shared.OriginSurugaya] = parsers.NewSurugayaParser(surugayaScrapper)
}
if app.config.OriginEnabled.Mandarake {
handlers[shared.OriginMandarake] = mandarake.NewParser(mandarake.ParserDeps{
handlers[shared.OriginMandarake] = mandarake.NewParser(mandarake.Deps{
Enabled: app.config.OriginEnabled.Mandarake,
ExternalBrowser: app.config.ExternalBrowser,
GoroutinesNumber: app.numCPUs,
@ -108,7 +114,7 @@ func (app *App) Run() {
receivedTasks := app.network.RequestTasks(ctx, apiClient)
log.WithField("length", len(receivedTasks)).Debug("End receiving")
taskProcessor.StartWork(receivedTasks)
taskProcessor.StartWork(ctx, receivedTasks)
}
go func() {
@ -143,37 +149,38 @@ func (app *App) Run() {
//start metrics server
go func() {
if err := mSrv.Run(); err != nil {
log.WithError(err).Error("Metrics server run failed")
if err := app.metricsSrv.Run(); err != nil {
errChan <- err
}
}()
//gRPC Server for status response
go func() {
listener, err := net.Listen("tcp", app.config.GrpcCfg.ServerHost+":"+app.config.GrpcCfg.ServerPort)
listener, err := net.Listen("tcp", addr)
if err != nil {
log.Fatalf("failed to listen: %v", err)
errChan <- err
}
log.Infof("gRPC Server listening at %v", app.config.GrpcCfg.ServerHost+":"+app.config.GrpcCfg.ServerPort)
if err := server.Serve(listener); err != nil {
log.Fatalf("failed to serve: %v", err)
log.Infof("gRPC Server listening at %v", addr)
if err = app.taskApiSrv.Serve(listener); err != nil {
errChan <- err
}
}()
go func() {
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, os.Interrupt, syscall.SIGTERM)
<-sigint
select {
case <-ctx.Done():
app.shutdown(ctx)
case err := <-errChan:
log.WithError(err).Fatal("Application run error")
}
}
func (app *App) shutdown(ctx context.Context) {
log.Info("Shutting down...")
period.Stop()
server.GracefulStop()
cancel()
if err := mSrv.Shutdown(ctx); err != nil {
app.taskApiSrv.GracefulStop()
if err := app.metricsSrv.Shutdown(ctx); err != nil {
log.WithError(err).Error("Failed to shutdown server")
}
}()
<-ctx.Done()
}

View file

@ -12,10 +12,10 @@ type Server struct {
state *appState.State
}
func newServer(app *App) *grpc.Server {
func newServer(state *appState.State) *grpc.Server {
s := grpc.NewServer()
srv := &Server{
state: app.state,
state: state,
}
pb.RegisterTaskProcessorServer(s, srv)
return s

View file

@ -1,10 +1,11 @@
package parsers
import (
"context"
"task-processor/internal/appState"
"task-processor/internal/shared"
)
type TaskHandler interface {
HandleTasks(tasks []shared.Task, sender chan shared.TaskResult, state *appState.State)
HandleTasks(ctx context.Context, tasks []shared.Task, sender chan shared.TaskResult, state *appState.State)
}

View file

@ -4,15 +4,20 @@ import (
"context"
"github.com/chromedp/chromedp"
log "github.com/sirupsen/logrus"
"runtime"
"sync"
"task-processor/internal/appState"
"task-processor/internal/shared"
)
func (s *Parser) HandleTasks(tasks []shared.Task, sender chan shared.TaskResult, state *appState.State) {
log.Debug(logHeader + logWorker + "handling tasks")
func (s *Parser) HandleTasks(ctx context.Context, tasks []shared.Task, sender chan shared.TaskResult, state *appState.State) {
log.Infof("%v %v handling tasks", logHeader, logWorker)
allocCtx, allocCancel := chromedp.NewRemoteAllocator(s.baseCtx, s.externalBrowser)
allocCtx, allocCancel := chromedp.NewRemoteAllocator(ctx, s.externalBrowser)
defer allocCancel()
sessionCtx, sessionCancel := chromedp.NewContext(allocCtx /* chromedp.WithLogf(log.Printf) */, chromedp.WithLogf(func(string, ...any) {}))
defer sessionCancel()
receiver := make(chan shared.Task, len(tasks))
for _, task := range tasks {
@ -20,41 +25,27 @@ func (s *Parser) HandleTasks(tasks []shared.Task, sender chan shared.TaskResult,
}
close(receiver)
log.Debugf("%v gorutines before wait group: %v", logHeader, runtime.NumGoroutine())
wg := sync.WaitGroup{}
for i := 0; i < s.goroutinesNumber; i++ {
wg.Add(1)
go func() {
defer wg.Done()
s.worker(allocCtx, receiver, sender, state)
s.worker(sessionCtx, receiver, sender)
}()
}
wg.Wait()
allocCancel()
log.Debug(logHeader + logWorker + "finished handling tasks")
log.Debugf("%v gorutines after wait group: %v", logHeader, runtime.NumGoroutine())
log.Infof(logHeader + logWorker + "finished handling tasks")
}
func (s *Parser) worker(ctx context.Context, receiver chan shared.Task, sender chan shared.TaskResult, state *appState.State) {
func (s *Parser) worker(ctx context.Context, receiver chan shared.Task, sender chan shared.TaskResult) {
for task := range receiver {
log.WithField("task_uuid", task.MerchUuid).Debug(logHeader + logWorker + "processing task")
//pageCtx, pageCancel := chromedp.NewContext(ctx, chromedp.WithLogf(func(string, ...any) {}))
//
//price, err := s.getPrice(pageCtx, task)
//pageCancel()
//price, err := s.getMinimalPrice(task)
//if err != nil {
// log.WithField("task_uuid", task.MerchUuid).Warn(logHeader + logWorker + logTaskWarning + "failed to process, zero price")
// sender <- shared.TaskResult{
// MerchUuid: task.MerchUuid,
// Origin: task.Origin,
// Price: zeroPrice,
// }
// continue
//}
log.WithField("task_uuid", task.MerchUuid).Infof("%v %v processing task", logHeader, logWorker)
//price will be zeroPrice value in case of any error or if price not found
price := s.getMinimalPrice(task)
price := s.getMinimalPrice(ctx, task)
sender <- shared.TaskResult{
MerchUuid: task.MerchUuid,
Origin: task.Origin,

View file

@ -1,39 +1,36 @@
package mandarake
import (
"context"
log "github.com/sirupsen/logrus"
)
const (
zeroPrice int32 = 0
taxMultiplier float64 = 1.1
logHeader = "Mandarake parser | "
logWorker = "worker: "
logTaskWarning = "task warning: "
logGetPrice = "get price: "
logHeader = "Mandarake parser |"
logWorker = "worker:"
logTaskWarning = "task warning:"
logGetPrice = "get price:"
)
type Parser struct {
baseCtx context.Context
externalBrowser string
goroutinesNumber int
}
type ParserDeps struct {
type Deps struct {
Enabled bool
ExternalBrowser string
GoroutinesNumber int
}
func NewParser(deps ParserDeps) *Parser {
func NewParser(deps Deps) *Parser {
if !deps.Enabled {
log.Info(logHeader + "disabled")
return nil
}
return &Parser{
baseCtx: context.Background(),
externalBrowser: deps.ExternalBrowser,
goroutinesNumber: deps.GoroutinesNumber,
}

View file

@ -41,14 +41,7 @@ func (s *Parser) getPrice(ctx context.Context, task shared.Task) (int32, error)
return minimal, nil
}
func (s *Parser) getMinimalPrice(task shared.Task) int32 {
ctx := context.Background()
allocCtx, allocCancel := chromedp.NewRemoteAllocator(ctx, s.externalBrowser)
defer allocCancel()
sessionCtx, sessionCancel := chromedp.NewContext(allocCtx, chromedp.WithLogf(log.Printf))
defer sessionCancel()
func (s *Parser) getMinimalPrice(sessionCtx context.Context, task shared.Task) int32 {
var (
singlePrice string
rangedPrice string

View file

@ -65,7 +65,6 @@ func TestParser_processPrices(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &Parser{
baseCtx: tt.fields.baseCtx,
externalBrowser: tt.fields.externalBrowser,
goroutinesNumber: tt.fields.goroutinesNumber,
}

View file

@ -11,21 +11,19 @@ import (
type SurugayaParser struct {
scrapper sc.SurugayaScrapperClient
ctx context.Context
}
func NewSurugayaParser(ctx context.Context, scrapper sc.SurugayaScrapperClient) *SurugayaParser {
func NewSurugayaParser(scrapper sc.SurugayaScrapperClient) *SurugayaParser {
log.Debug("Surugaya parser init")
return &SurugayaParser{
scrapper: scrapper,
ctx: ctx,
}
}
func (s *SurugayaParser) HandleTasks(tasks []shared.Task, sender chan shared.TaskResult, state *appState.State) {
func (s *SurugayaParser) HandleTasks(ctx context.Context, tasks []shared.Task, sender chan shared.TaskResult, state *appState.State) {
log.WithField("count", len(tasks)).Debug("Handling Surugaya Tasks")
stream, err := s.scrapper.ProcessTasks(s.ctx)
stream, err := s.scrapper.ProcessTasks(ctx)
if err != nil {
log.WithField("err", err).Error("Error creating stream")
return

View file

@ -1,13 +1,14 @@
package processor
import (
"context"
log "github.com/sirupsen/logrus"
"sync"
"task-processor/internal/appState"
"task-processor/internal/shared"
)
func (p *Processor) StartWork(receivedTasks []shared.TaskResponse) {
func (p *Processor) StartWork(ctx context.Context, receivedTasks []shared.TaskResponse) {
log.Info("Starting work...")
p.state.ResetCounters()
@ -27,7 +28,7 @@ func (p *Processor) StartWork(receivedTasks []shared.TaskResponse) {
go func(origin string, tasks []shared.Task) {
defer wg.Done()
log.Info("Running task handler for origin: ", origin)
p.handlers[origin].HandleTasks(tasks, p.out, p.state)
p.handlers[origin].HandleTasks(ctx, tasks, p.out, p.state)
}(origin, tasks)
}
wg.Wait()