Browse Source

- Some child processes may not have been shut down properly at
supervisor shutdown time.

Get rid of "delayprocesses" concept in favor of relying on process states.

Chris McDonough 17 years ago
parent
commit
806a061d42

+ 3 - 0
CHANGES.txt

@@ -228,6 +228,9 @@ Next Release
   - Fixed http://www.plope.com/software/collector/215 (bad error
     message in supervisorctl when program command not found on PATH).
 
+  - Some child processes may not have been shut down properly at
+    supervisor shutdown time.
+
 3.0a2
 
   - Fixed the README.txt example for defining the supervisor RPC

+ 0 - 4
src/supervisor/process.py

@@ -572,10 +572,6 @@ class ProcessGroupBase:
                 # BACKOFF -> FATAL
                 proc.give_up()
 
-    def get_delay_processes(self):
-        """ Processes which are starting or stopping """
-        return [ x for x in self.processes.values() if x.delay ]
-
     def get_unstopped_processes(self):
         """ Processes which aren't in a state that is considered 'stopped' """
         return [ x for x in self.processes.values() if x.get_state() not in

+ 13 - 13
src/supervisor/supervisord.py

@@ -56,7 +56,7 @@ from supervisor.states import getProcessStateDescription
 
 class Supervisor:
     stopping = False # set after we detect that we are handling a stop request
-    lastdelayreport = 0 # throttle for delayed process error reports at stop
+    lastshutdownreport = 0 # throttle for delayed process error reports at stop
     process_groups = None # map of process group name to process group object
     stop_groups = None # list used for priority ordered shutdown
 
@@ -118,26 +118,26 @@ class Supervisor:
             process_map.update(group.get_dispatchers())
         return process_map
 
-    def get_delay_processes(self):
-        delayprocs = []
+    def shutdown_report(self):
+        unstopped = []
 
         pgroups = self.process_groups.values()
         for group in pgroups:
-            delayprocs.extend(group.get_delay_processes())
+            unstopped.extend(group.get_unstopped_processes())
 
-        if delayprocs:
+        if unstopped:
             # throttle 'waiting for x to die' reports
             now = time.time()
-            if now > (self.lastdelayreport + 3): # every 3 secs
-                names = [ p.config.name for p in delayprocs]
+            if now > (self.lastshutdownreport + 3): # every 3 secs
+                names = [ p.config.name for p in unstopped ]
                 namestr = ', '.join(names)
                 self.options.logger.info('waiting for %s to die' % namestr)
-                self.lastdelayreport = now
-                for proc in delayprocs:
+                self.lastshutdownreport = now
+                for proc in unstopped:
                     state = getProcessStateDescription(proc.get_state())
                     self.options.logger.blather(
                         '%s state: %s' % (proc.config.name, state))
-        return delayprocs
+        return unstopped
 
     def ordered_stop_groups_phase_1(self):
         if self.stop_groups:
@@ -181,9 +181,9 @@ class Supervisor:
 
                 self.ordered_stop_groups_phase_1()
 
-                if not self.get_delay_processes():
-                    # if there are no delayed processes (we're done killing
-                    # everything), it's OK to stop or reload
+                if not self.shutdown_report():
+                    # if there are no unstopped processes (we're done
+                    # killing everything), it's OK to swtop or reload
                     raise asyncore.ExitNow
 
             r, w, x = [], [], []

+ 0 - 4
src/supervisor/tests/base.py

@@ -788,7 +788,6 @@ class DummyProcessGroup:
         self.config = config
         self.transitioned = False
         self.all_stopped = False
-        self.delay_processes = []
         self.dispatchers = {}
         self.unstopped_processes = []
 
@@ -798,9 +797,6 @@ class DummyProcessGroup:
     def stop_all(self):
         self.all_stopped = True
 
-    def get_delay_processes(self):
-        return self.delay_processes
-
     def get_unstopped_processes(self):
         return self.unstopped_processes
 

+ 4 - 5
src/supervisor/tests/test_process.py

@@ -1103,18 +1103,17 @@ class ProcessGroupBaseTests(unittest.TestCase):
     def _makeOne(self, *args, **kw):
         return self._getTargetClass()(*args, **kw)
 
-    def test_get_delay_processes(self):
+    def test_get_unstopped_processes(self):
         options = DummyOptions()
         from supervisor.states import ProcessStates
         pconfig1 = DummyPConfig(options, 'process1', 'process1','/bin/process1')
         process1 = DummyProcess(pconfig1, state=ProcessStates.STOPPING)
-        process1.delay = 1
         gconfig = DummyPGroupConfig(options, pconfigs=[pconfig1])
         group = self._makeOne(gconfig)
         group.processes = { 'process1': process1 }
-        delayed = group.get_delay_processes()
-        self.assertEqual(delayed, [process1])
-        
+        unstopped = group.get_unstopped_processes()
+        self.assertEqual(unstopped, [process1])
+
     def test_stop_all(self):
         from supervisor.states import ProcessStates
         options = DummyOptions()

+ 2 - 2
src/supervisor/tests/test_supervisord.py

@@ -294,7 +294,7 @@ class SupervisordTests(unittest.TestCase):
         process = DummyProcess(pconfig)
         gconfig = DummyPGroupConfig(options, pconfigs=[pconfig])
         pgroup = DummyProcessGroup(gconfig)
-        pgroup.delay_processes = [process]
+        pgroup.unstopped_processes = [process]
         L = []
         def callback():
             L.append(1)
@@ -303,7 +303,7 @@ class SupervisordTests(unittest.TestCase):
         import asyncore
         supervisord.options.test = True
         supervisord.runforever()
-        self.assertNotEqual(supervisord.lastdelayreport, 0)
+        self.assertNotEqual(supervisord.lastshutdownreport, 0)
 
     def test_getSupervisorStateDescription(self):
         from supervisor.states import getSupervisorStateDescription