1
0
Fork 0
forked from forgejo/forgejo

Add Graceful shutdown for Windows and hooks for shutdown of goroutines (#8964)

* Graceful Shutdown for windows and others

Restructures modules/graceful, adding shutdown for windows, removing and
replacing the old minwinsvc code.

Creates a new waitGroup - terminate which allows for goroutines to
finish up after the shutdown of the servers.

Shutdown and terminate hooks are added for goroutines.

* Remove unused functions - these can be added in a different PR

* Add startup timeout functionality

* Document STARTUP_TIMEOUT
This commit is contained in:
zeripath 2019-11-21 18:32:02 +00:00 committed by techknowlogick
parent d7ac9727bb
commit cbaa1de9ec
30 changed files with 666 additions and 497 deletions

View file

@ -1,40 +0,0 @@
// +build !windows
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package graceful
import "sync"
var cleanupWaitGroup sync.WaitGroup
func init() {
cleanupWaitGroup = sync.WaitGroup{}
// There are three places that could inherit sockets:
//
// * HTTP or HTTPS main listener
// * HTTP redirection fallback
// * SSH
//
// If you add an additional place you must increment this number
// and add a function to call InformCleanup if it's not going to be used
cleanupWaitGroup.Add(3)
// Wait till we're done getting all of the listeners and then close
// the unused ones
go func() {
cleanupWaitGroup.Wait()
// Ignore the error here there's not much we can do with it
// They're logged in the CloseProvidedListeners function
_ = CloseProvidedListeners()
}()
}
// InformCleanup tells the cleanup wait group that we have either taken a listener
// or will not be taking a listener
func InformCleanup() {
cleanupWaitGroup.Done()
}

View file

@ -1,16 +0,0 @@
// +build windows
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// This code is heavily inspired by the archived gofacebook/gracenet/net.go handler
package graceful
// This file contains shims for windows builds
const IsChild = false
// WaitForServers waits for all running servers to finish
func WaitForServers() {
}

187
modules/graceful/manager.go Normal file
View file

@ -0,0 +1,187 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package graceful
import (
"time"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
)
type state uint8
const (
stateInit state = iota
stateRunning
stateShuttingDown
stateTerminate
)
// There are three places that could inherit sockets:
//
// * HTTP or HTTPS main listener
// * HTTP redirection fallback
// * SSH
//
// If you add an additional place you must increment this number
// and add a function to call manager.InformCleanup if it's not going to be used
const numberOfServersToCreate = 3
// Manager represents the graceful server manager interface
var Manager *gracefulManager
func init() {
Manager = newGracefulManager()
}
func (g *gracefulManager) doShutdown() {
if !g.setStateTransition(stateRunning, stateShuttingDown) {
return
}
g.lock.Lock()
close(g.shutdown)
g.lock.Unlock()
if setting.GracefulHammerTime >= 0 {
go g.doHammerTime(setting.GracefulHammerTime)
}
go func() {
g.WaitForServers()
<-time.After(1 * time.Second)
g.doTerminate()
}()
}
func (g *gracefulManager) doHammerTime(d time.Duration) {
time.Sleep(d)
select {
case <-g.hammer:
default:
log.Warn("Setting Hammer condition")
close(g.hammer)
}
}
func (g *gracefulManager) doTerminate() {
if !g.setStateTransition(stateShuttingDown, stateTerminate) {
return
}
g.lock.Lock()
close(g.terminate)
g.lock.Unlock()
}
// IsChild returns if the current process is a child of previous Gitea process
func (g *gracefulManager) IsChild() bool {
return g.isChild
}
// IsShutdown returns a channel which will be closed at shutdown.
// The order of closure is IsShutdown, IsHammer (potentially), IsTerminate
func (g *gracefulManager) IsShutdown() <-chan struct{} {
g.lock.RLock()
if g.shutdown == nil {
g.lock.RUnlock()
g.lock.Lock()
if g.shutdown == nil {
g.shutdown = make(chan struct{})
}
defer g.lock.Unlock()
return g.shutdown
}
defer g.lock.RUnlock()
return g.shutdown
}
// IsHammer returns a channel which will be closed at hammer
// The order of closure is IsShutdown, IsHammer (potentially), IsTerminate
// Servers running within the running server wait group should respond to IsHammer
// if not shutdown already
func (g *gracefulManager) IsHammer() <-chan struct{} {
g.lock.RLock()
if g.hammer == nil {
g.lock.RUnlock()
g.lock.Lock()
if g.hammer == nil {
g.hammer = make(chan struct{})
}
defer g.lock.Unlock()
return g.hammer
}
defer g.lock.RUnlock()
return g.hammer
}
// IsTerminate returns a channel which will be closed at terminate
// The order of closure is IsShutdown, IsHammer (potentially), IsTerminate
// IsTerminate will only close once all running servers have stopped
func (g *gracefulManager) IsTerminate() <-chan struct{} {
g.lock.RLock()
if g.terminate == nil {
g.lock.RUnlock()
g.lock.Lock()
if g.terminate == nil {
g.terminate = make(chan struct{})
}
defer g.lock.Unlock()
return g.terminate
}
defer g.lock.RUnlock()
return g.terminate
}
// ServerDone declares a running server done and subtracts one from the
// running server wait group. Users probably do not want to call this
// and should use one of the RunWithShutdown* functions
func (g *gracefulManager) ServerDone() {
g.runningServerWaitGroup.Done()
}
// WaitForServers waits for all running servers to finish. Users should probably
// instead use AtTerminate or IsTerminate
func (g *gracefulManager) WaitForServers() {
g.runningServerWaitGroup.Wait()
}
// WaitForTerminate waits for all terminating actions to finish.
// Only the main go-routine should use this
func (g *gracefulManager) WaitForTerminate() {
g.terminateWaitGroup.Wait()
}
func (g *gracefulManager) getState() state {
g.lock.RLock()
defer g.lock.RUnlock()
return g.state
}
func (g *gracefulManager) setStateTransition(old, new state) bool {
if old != g.getState() {
return false
}
g.lock.Lock()
if g.state != old {
g.lock.Unlock()
return false
}
g.state = new
g.lock.Unlock()
return true
}
func (g *gracefulManager) setState(st state) {
g.lock.Lock()
defer g.lock.Unlock()
g.state = st
}
// InformCleanup tells the cleanup wait group that we have either taken a listener
// or will not be taking a listener
func (g *gracefulManager) InformCleanup() {
g.createServerWaitGroup.Done()
}

View file

@ -0,0 +1,141 @@
// +build !windows
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package graceful
import (
"errors"
"os"
"os/signal"
"sync"
"syscall"
"time"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
)
type gracefulManager struct {
isChild bool
forked bool
lock *sync.RWMutex
state state
shutdown chan struct{}
hammer chan struct{}
terminate chan struct{}
runningServerWaitGroup sync.WaitGroup
createServerWaitGroup sync.WaitGroup
terminateWaitGroup sync.WaitGroup
}
func newGracefulManager() *gracefulManager {
manager := &gracefulManager{
isChild: len(os.Getenv(listenFDs)) > 0 && os.Getppid() > 1,
lock: &sync.RWMutex{},
}
manager.createServerWaitGroup.Add(numberOfServersToCreate)
manager.Run()
return manager
}
func (g *gracefulManager) Run() {
g.setState(stateRunning)
go g.handleSignals()
c := make(chan struct{})
go func() {
defer close(c)
// Wait till we're done getting all of the listeners and then close
// the unused ones
g.createServerWaitGroup.Wait()
// Ignore the error here there's not much we can do with it
// They're logged in the CloseProvidedListeners function
_ = CloseProvidedListeners()
}()
if setting.StartupTimeout > 0 {
go func() {
select {
case <-c:
return
case <-g.IsShutdown():
return
case <-time.After(setting.StartupTimeout):
log.Error("Startup took too long! Shutting down")
g.doShutdown()
}
}()
}
}
func (g *gracefulManager) handleSignals() {
var sig os.Signal
signalChannel := make(chan os.Signal, 1)
signal.Notify(
signalChannel,
syscall.SIGHUP,
syscall.SIGUSR1,
syscall.SIGUSR2,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGTSTP,
)
pid := syscall.Getpid()
for {
sig = <-signalChannel
switch sig {
case syscall.SIGHUP:
if setting.GracefulRestartable {
log.Info("PID: %d. Received SIGHUP. Forking...", pid)
err := g.doFork()
if err != nil && err.Error() != "another process already forked. Ignoring this one" {
log.Error("Error whilst forking from PID: %d : %v", pid, err)
}
} else {
log.Info("PID: %d. Received SIGHUP. Not set restartable. Shutting down...", pid)
g.doShutdown()
}
case syscall.SIGUSR1:
log.Info("PID %d. Received SIGUSR1.", pid)
case syscall.SIGUSR2:
log.Warn("PID %d. Received SIGUSR2. Hammering...", pid)
g.doHammerTime(0 * time.Second)
case syscall.SIGINT:
log.Warn("PID %d. Received SIGINT. Shutting down...", pid)
g.doShutdown()
case syscall.SIGTERM:
log.Warn("PID %d. Received SIGTERM. Shutting down...", pid)
g.doShutdown()
case syscall.SIGTSTP:
log.Info("PID %d. Received SIGTSTP.", pid)
default:
log.Info("PID %d. Received %v.", pid, sig)
}
}
}
func (g *gracefulManager) doFork() error {
g.lock.Lock()
if g.forked {
g.lock.Unlock()
return errors.New("another process already forked. Ignoring this one")
}
g.forked = true
g.lock.Unlock()
// We need to move the file logs to append pids
setting.RestartLogsWithPIDSuffix()
_, err := RestartProcess()
return err
}
func (g *gracefulManager) RegisterServer() {
KillParent()
g.runningServerWaitGroup.Add(1)
}

View file

@ -0,0 +1,162 @@
// +build windows
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// This code is heavily inspired by the archived gofacebook/gracenet/net.go handler
package graceful
import (
"os"
"strconv"
"sync"
"time"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"golang.org/x/sys/windows/svc"
"golang.org/x/sys/windows/svc/debug"
)
var WindowsServiceName = "gitea"
const (
hammerCode = 128
hammerCmd = svc.Cmd(hammerCode)
acceptHammerCode = svc.Accepted(hammerCode)
)
type gracefulManager struct {
isChild bool
lock *sync.RWMutex
state state
shutdown chan struct{}
hammer chan struct{}
terminate chan struct{}
runningServerWaitGroup sync.WaitGroup
createServerWaitGroup sync.WaitGroup
terminateWaitGroup sync.WaitGroup
}
func newGracefulManager() *gracefulManager {
manager := &gracefulManager{
isChild: false,
lock: &sync.RWMutex{},
}
manager.createServerWaitGroup.Add(numberOfServersToCreate)
manager.Run()
return manager
}
func (g *gracefulManager) Run() {
g.setState(stateRunning)
if skip, _ := strconv.ParseBool(os.Getenv("SKIP_MINWINSVC")); skip {
return
}
run := svc.Run
isInteractive, err := svc.IsAnInteractiveSession()
if err != nil {
log.Error("Unable to ascertain if running as an Interactive Session: %v", err)
return
}
if isInteractive {
run = debug.Run
}
go run(WindowsServiceName, g)
}
// Execute makes gracefulManager implement svc.Handler
func (g *gracefulManager) Execute(args []string, changes <-chan svc.ChangeRequest, status chan<- svc.Status) (svcSpecificEC bool, exitCode uint32) {
if setting.StartupTimeout > 0 {
status <- svc.Status{State: svc.StartPending}
} else {
status <- svc.Status{State: svc.StartPending, WaitHint: uint32(setting.StartupTimeout/time.Millisecond)}
}
// Now need to wait for everything to start...
if !g.awaitServer(setting.StartupTimeout) {
return false, 1
}
// We need to implement some way of svc.AcceptParamChange/svc.ParamChange
status <- svc.Status{
State: svc.Running,
Accepts: svc.AcceptStop | svc.AcceptShutdown | acceptHammerCode,
}
waitTime := 30 * time.Second
loop:
for change := range changes {
switch change.Cmd {
case svc.Interrogate:
status <- change.CurrentStatus
case svc.Stop, svc.Shutdown:
g.doShutdown()
waitTime += setting.GracefulHammerTime
break loop
case hammerCode:
g.doShutdown()
g.doHammerTime(0 *time.Second)
break loop
default:
log.Debug("Unexpected control request: %v", change.Cmd)
}
}
status <- svc.Status{
State: svc.StopPending,
WaitHint: uint32(waitTime/time.Millisecond),
}
hammerLoop:
for {
select {
case change := <-changes:
switch change.Cmd {
case svc.Interrogate:
status <- change.CurrentStatus
case svc.Stop, svc.Shutdown, hammerCmd:
g.doHammerTime(0 * time.Second)
break hammerLoop
default:
log.Debug("Unexpected control request: %v", change.Cmd)
}
case <-g.hammer:
break hammerLoop
}
}
return false, 0
}
func (g *gracefulManager) RegisterServer() {
g.runningServerWaitGroup.Add(1)
}
func (g *gracefulManager) awaitServer(limit time.Duration) bool {
c := make(chan struct{})
go func() {
defer close(c)
g.createServerWaitGroup.Wait()
}()
if limit > 0 {
select {
case <-c:
return true // completed normally
case <-time.After(limit):
return false // timed out
case <-g.IsShutdown():
return false
}
} else {
select {
case <-c:
return true // completed normally
case <-g.IsShutdown():
return false
}
}
}

View file

@ -100,7 +100,7 @@ func CloseProvidedListeners() error {
// creates a new one using net.Listen.
func GetListener(network, address string) (net.Listener, error) {
// Add a deferral to say that we've tried to grab a listener
defer InformCleanup()
defer Manager.InformCleanup()
switch network {
case "tcp", "tcp4", "tcp6":
tcpAddr, err := net.ResolveTCPAddr(network, address)

View file

@ -0,0 +1,19 @@
// +build windows
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// This code is heavily inspired by the archived gofacebook/gracenet/net.go handler
package graceful
import "net"
// GetListener obtains a listener for the local network address.
// On windows this is basically just a shim around net.Listen.
func GetListener(network, address string) (net.Listener, error) {
// Add a deferral to say that we've tried to grab a listener
defer Manager.InformCleanup()
return net.Listen(network, address)
}

View file

@ -21,7 +21,7 @@ var killParent sync.Once
// KillParent sends the kill signal to the parent process if we are a child
func KillParent() {
killParent.Do(func() {
if IsChild {
if Manager.IsChild() {
ppid := syscall.Getppid()
if ppid > 1 {
_ = syscall.Kill(ppid, syscall.SIGTERM)
@ -79,7 +79,3 @@ func RestartProcess() (int, error) {
}
return process.Pid, nil
}
type filer interface {
File() (*os.File, error)
}

View file

@ -1,5 +1,3 @@
// +build !windows
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
@ -19,37 +17,16 @@ import (
"code.gitea.io/gitea/modules/log"
)
type state uint8
const (
stateInit state = iota
stateRunning
stateShuttingDown
stateTerminate
)
var (
// RWMutex for when adding servers or shutting down
runningServerReg sync.RWMutex
runningServerWG sync.WaitGroup
// ensure we only fork once
runningServersForked bool
// DefaultReadTimeOut default read timeout
DefaultReadTimeOut time.Duration
// DefaultWriteTimeOut default write timeout
DefaultWriteTimeOut time.Duration
// DefaultMaxHeaderBytes default max header bytes
DefaultMaxHeaderBytes int
// IsChild reports if we are a fork iff LISTEN_FDS is set and our parent PID is not 1
IsChild = len(os.Getenv(listenFDs)) > 0 && os.Getppid() > 1
)
func init() {
runningServerReg = sync.RWMutex{}
runningServerWG = sync.WaitGroup{}
DefaultMaxHeaderBytes = 0 // use http.DefaultMaxHeaderBytes - which currently is 1 << 20 (1MB)
}
@ -58,43 +35,29 @@ type ServeFunction = func(net.Listener) error
// Server represents our graceful server
type Server struct {
network string
address string
listener net.Listener
PreSignalHooks map[os.Signal][]func()
PostSignalHooks map[os.Signal][]func()
wg sync.WaitGroup
sigChan chan os.Signal
state state
lock *sync.RWMutex
BeforeBegin func(network, address string)
OnShutdown func()
}
// WaitForServers waits for all running servers to finish
func WaitForServers() {
runningServerWG.Wait()
network string
address string
listener net.Listener
wg sync.WaitGroup
state state
lock *sync.RWMutex
BeforeBegin func(network, address string)
OnShutdown func()
}
// NewServer creates a server on network at provided address
func NewServer(network, address string) *Server {
runningServerReg.Lock()
defer runningServerReg.Unlock()
if IsChild {
if Manager.IsChild() {
log.Info("Restarting new server: %s:%s on PID: %d", network, address, os.Getpid())
} else {
log.Info("Starting new server: %s:%s on PID: %d", network, address, os.Getpid())
}
srv := &Server{
wg: sync.WaitGroup{},
sigChan: make(chan os.Signal),
PreSignalHooks: map[os.Signal][]func(){},
PostSignalHooks: map[os.Signal][]func(){},
state: stateInit,
lock: &sync.RWMutex{},
network: network,
address: address,
wg: sync.WaitGroup{},
state: stateInit,
lock: &sync.RWMutex{},
network: network,
address: address,
}
srv.BeforeBegin = func(network, addr string) {
@ -107,7 +70,7 @@ func NewServer(network, address string) *Server {
// ListenAndServe listens on the provided network address and then calls Serve
// to handle requests on incoming connections.
func (srv *Server) ListenAndServe(serve ServeFunction) error {
go srv.handleSignals()
go srv.awaitShutdown()
l, err := GetListener(srv.network, srv.address)
if err != nil {
@ -117,8 +80,6 @@ func (srv *Server) ListenAndServe(serve ServeFunction) error {
srv.listener = newWrappedListener(l, srv)
KillParent()
srv.BeforeBegin(srv.network, srv.address)
return srv.Serve(serve)
@ -150,7 +111,7 @@ func (srv *Server) ListenAndServeTLS(certFile, keyFile string, serve ServeFuncti
// ListenAndServeTLSConfig listens on the provided network address and then calls
// Serve to handle requests on incoming TLS connections.
func (srv *Server) ListenAndServeTLSConfig(tlsConfig *tls.Config, serve ServeFunction) error {
go srv.handleSignals()
go srv.awaitShutdown()
l, err := GetListener(srv.network, srv.address)
if err != nil {
@ -161,7 +122,6 @@ func (srv *Server) ListenAndServeTLSConfig(tlsConfig *tls.Config, serve ServeFun
wl := newWrappedListener(l, srv)
srv.listener = tls.NewListener(wl, tlsConfig)
KillParent()
srv.BeforeBegin(srv.network, srv.address)
return srv.Serve(serve)
@ -178,12 +138,12 @@ func (srv *Server) ListenAndServeTLSConfig(tlsConfig *tls.Config, serve ServeFun
func (srv *Server) Serve(serve ServeFunction) error {
defer log.Debug("Serve() returning... (PID: %d)", syscall.Getpid())
srv.setState(stateRunning)
runningServerWG.Add(1)
Manager.RegisterServer()
err := serve(srv.listener)
log.Debug("Waiting for connections to finish... (PID: %d)", syscall.Getpid())
srv.wg.Wait()
srv.setState(stateTerminate)
runningServerWG.Done()
Manager.ServerDone()
// use of closed means that the listeners are closed - i.e. we should be shutting down - return nil
if err != nil && strings.Contains(err.Error(), "use of closed") {
return nil
@ -205,6 +165,10 @@ func (srv *Server) setState(st state) {
srv.state = st
}
type filer interface {
File() (*os.File, error)
}
type wrappedListener struct {
net.Listener
stopped bool

View file

@ -1,5 +1,3 @@
// +build !windows
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
@ -7,29 +5,37 @@
package graceful
import (
"errors"
"fmt"
"os"
"runtime"
"time"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
)
// awaitShutdown waits for the shutdown signal from the Manager
func (srv *Server) awaitShutdown() {
select {
case <-Manager.IsShutdown():
// Shutdown
srv.doShutdown()
case <-Manager.IsHammer():
// Hammer
srv.doShutdown()
srv.doHammer()
}
<-Manager.IsHammer()
srv.doHammer()
}
// shutdown closes the listener so that no new connections are accepted
// and starts a goroutine that will hammer (stop all running requests) the server
// after setting.GracefulHammerTime.
func (srv *Server) shutdown() {
func (srv *Server) doShutdown() {
// only shutdown if we're running.
if srv.getState() != stateRunning {
return
}
srv.setState(stateShuttingDown)
if setting.GracefulHammerTime >= 0 {
go srv.hammerTime(setting.GracefulHammerTime)
}
if srv.OnShutdown != nil {
srv.OnShutdown()
@ -42,14 +48,7 @@ func (srv *Server) shutdown() {
}
}
// hammerTime forces the server to shutdown in a given timeout - whether it
// finished outstanding requests or not. if Read/WriteTimeout are not set or the
// max header size is very big a connection could hang...
//
// srv.Serve() will not return until all connections are served. this will
// unblock the srv.wg.Wait() in Serve() thus causing ListenAndServe* functions to
// return.
func (srv *Server) hammerTime(d time.Duration) {
func (srv *Server) doHammer() {
defer func() {
// We call srv.wg.Done() until it panics.
// This happens if we call Done() when the WaitGroup counter is already at 0
@ -62,7 +61,6 @@ func (srv *Server) hammerTime(d time.Duration) {
if srv.getState() != stateShuttingDown {
return
}
time.Sleep(d)
log.Warn("Forcefully shutting down parent")
for {
if srv.getState() == stateTerminate {
@ -74,48 +72,3 @@ func (srv *Server) hammerTime(d time.Duration) {
runtime.Gosched()
}
}
func (srv *Server) fork() error {
runningServerReg.Lock()
defer runningServerReg.Unlock()
// only one server instance should fork!
if runningServersForked {
return errors.New("another process already forked. Ignoring this one")
}
runningServersForked = true
// We need to move the file logs to append pids
setting.RestartLogsWithPIDSuffix()
_, err := RestartProcess()
return err
}
// RegisterPreSignalHook registers a function to be run before the signal handler for
// a given signal. These are not mutex locked and should therefore be only called before Serve.
func (srv *Server) RegisterPreSignalHook(sig os.Signal, f func()) (err error) {
for _, s := range hookableSignals {
if s == sig {
srv.PreSignalHooks[sig] = append(srv.PreSignalHooks[sig], f)
return
}
}
err = fmt.Errorf("Signal %v is not supported", sig)
return
}
// RegisterPostSignalHook registers a function to be run after the signal handler for
// a given signal. These are not mutex locked and should therefore be only called before Serve.
func (srv *Server) RegisterPostSignalHook(sig os.Signal, f func()) (err error) {
for _, s := range hookableSignals {
if s == sig {
srv.PostSignalHooks[sig] = append(srv.PostSignalHooks[sig], f)
return
}
}
err = fmt.Errorf("Signal %v is not supported", sig)
return
}

View file

@ -1,5 +1,3 @@
// +build !windows
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.

View file

@ -1,95 +0,0 @@
// +build !windows
// Copyright 2019 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package graceful
import (
"os"
"os/signal"
"syscall"
"time"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
)
var hookableSignals []os.Signal
func init() {
hookableSignals = []os.Signal{
syscall.SIGHUP,
syscall.SIGUSR1,
syscall.SIGUSR2,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGTSTP,
}
}
// handleSignals listens for os Signals and calls any hooked in function that the
// user had registered with the signal.
func (srv *Server) handleSignals() {
var sig os.Signal
signal.Notify(
srv.sigChan,
hookableSignals...,
)
pid := syscall.Getpid()
for {
sig = <-srv.sigChan
srv.preSignalHooks(sig)
switch sig {
case syscall.SIGHUP:
if setting.GracefulRestartable {
log.Info("PID: %d. Received SIGHUP. Forking...", pid)
err := srv.fork()
if err != nil && err.Error() != "another process already forked. Ignoring this one" {
log.Error("Error whilst forking from PID: %d : %v", pid, err)
}
} else {
log.Info("PID: %d. Received SIGHUP. Not set restartable. Shutting down...", pid)
srv.shutdown()
}
case syscall.SIGUSR1:
log.Info("PID %d. Received SIGUSR1.", pid)
case syscall.SIGUSR2:
log.Warn("PID %d. Received SIGUSR2. Hammering...", pid)
srv.hammerTime(0 * time.Second)
case syscall.SIGINT:
log.Warn("PID %d. Received SIGINT. Shutting down...", pid)
srv.shutdown()
case syscall.SIGTERM:
log.Warn("PID %d. Received SIGTERM. Shutting down...", pid)
srv.shutdown()
case syscall.SIGTSTP:
log.Info("PID %d. Received SIGTSTP.")
default:
log.Info("PID %d. Received %v.", sig)
}
srv.postSignalHooks(sig)
}
}
func (srv *Server) preSignalHooks(sig os.Signal) {
if _, notSet := srv.PreSignalHooks[sig]; !notSet {
return
}
for _, f := range srv.PreSignalHooks[sig] {
f()
}
}
func (srv *Server) postSignalHooks(sig os.Signal) {
if _, notSet := srv.PostSignalHooks[sig]; !notSet {
return
}
for _, f := range srv.PostSignalHooks[sig] {
f()
}
}