@@ -71,6 +71,19 @@ def eventuallyTrue(self, f, m, retries=60, interval=2):
7171 raise
7272 time .sleep (interval )
7373
74+ def eventuallyTrueFunc (self , f , xf , m , retries = 60 , interval = 2 ):
75+ while True :
76+ try :
77+ y = f ()
78+ x = xf (y )
79+ self .assertTrue (xf (y ), m )
80+ return True
81+ except AssertionError :
82+ retries = retries - 1
83+ if not retries > 0 :
84+ raise
85+ time .sleep (interval )
86+
7487 @classmethod
7588 @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
7689 def setUpClass (cls ):
@@ -559,7 +572,7 @@ def compare_config():
559572
560573 pg_patch_config ["spec" ]["patroni" ]["slots" ][slot_to_change ]["database" ] = "bar"
561574 del pg_patch_config ["spec" ]["patroni" ]["slots" ][slot_to_remove ]
562-
575+
563576 k8s .api .custom_objects_api .patch_namespaced_custom_object (
564577 "acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_delete_slot_patch )
565578
@@ -576,7 +589,7 @@ def compare_config():
576589
577590 self .eventuallyEqual (lambda : self .query_database (leader .metadata .name , "postgres" , get_slot_query % ("database" , slot_to_change ))[0 ], "bar" ,
578591 "The replication slot cannot be updated" , 10 , 5 )
579-
592+
580593 # make sure slot from Patroni didn't get deleted
581594 self .eventuallyEqual (lambda : len (self .query_database (leader .metadata .name , "postgres" , get_slot_query % ("slot_name" , patroni_slot ))), 1 ,
582595 "The replication slot from Patroni gets deleted" , 10 , 5 )
@@ -1670,6 +1683,13 @@ def test_overwrite_pooler_deployment(self):
16701683 self .eventuallyEqual (lambda : k8s .get_deployment_replica_count (name = pooler_name ), 2 ,
16711684 "Operator did not succeed in overwriting labels" )
16721685
1686+ # status observedGeneration should match metadata.generation
1687+ self .eventuallyTrueFunc (
1688+ lambda : k8s .pg_get (),
1689+ lambda pg : pg .get ("metadata" , {}).get ("generation" , 0 ) == pg .get ("status" , {}).get ("observedGeneration" , - 1 ),
1690+ "Expected generation and status.observedGeneration to match" ,
1691+ )
1692+
16731693 k8s .api .custom_objects_api .patch_namespaced_custom_object (
16741694 'acid.zalan.do' , 'v1' , 'default' ,
16751695 'postgresqls' , 'acid-minimal-cluster' ,
@@ -1683,6 +1703,13 @@ def test_overwrite_pooler_deployment(self):
16831703 self .eventuallyEqual (lambda : k8s .count_running_pods ("connection-pooler=" + pooler_name ),
16841704 0 , "Pooler pods not scaled down" )
16851705
1706+ # status observedGeneration should match metadata.generation
1707+ self .eventuallyTrueFunc (
1708+ lambda : k8s .pg_get (),
1709+ lambda pg : pg .get ("metadata" , {}).get ("generation" , 0 ) == pg .get ("status" , {}).get ("observedGeneration" , - 1 ),
1710+ "Expected generation and status.observedGeneration to match" ,
1711+ )
1712+
16861713 @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
16871714 def test_owner_references (self ):
16881715 '''
@@ -2022,7 +2049,7 @@ def test_rolling_update_label_timeout(self):
20222049
20232050 # pod_label_wait_timeout should have been exceeded hence the rolling update is continued on next sync
20242051 # check if the cluster state is "SyncFailed"
2025- self .eventuallyEqual (lambda : k8s .pg_get_status (), " SyncFailed" , "Expected SYNC event to fail" )
2052+ self .eventuallyEqual (lambda : k8s .pg_get_status (), { "PostgresClusterStatus" : " SyncFailed"} , "Expected SYNC event to fail" )
20262053
20272054 # wait for next sync, replica should be running normally by now and be ready for switchover
20282055 k8s .wait_for_pod_failover (replica_nodes , 'spilo-role=master,' + cluster_label )
@@ -2037,7 +2064,13 @@ def test_rolling_update_label_timeout(self):
20372064
20382065 # status should again be "SyncFailed" but turn into "Running" on the next sync
20392066 time .sleep (30 )
2040- self .eventuallyEqual (lambda : k8s .pg_get_status (), "Running" , "Expected running cluster after two syncs" )
2067+ self .eventuallyEqual (lambda : k8s .pg_get_status (), {"PostgresClusterStatus" : "Running" }, "Expected running cluster after two syncs" )
2068+ # status observedGeneration should match metadata.generation
2069+ self .eventuallyTrueFunc (
2070+ lambda : k8s .pg_get (),
2071+ lambda pg : pg .get ("metadata" , {}).get ("generation" , 0 ) == pg .get ("status" , {}).get ("observedGeneration" , - 1 ),
2072+ "Expected generation and status.observedGeneration to match" ,
2073+ )
20412074
20422075 # revert config changes
20432076 patch_resync_config = {
0 commit comments