Open DanielOsypenko opened 15 hours ago
three tests have similar issue:
tests/functional/odf-cli/test_get_commands.py::TestGetCommands::test_get_health tests/functional/odf-cli/test_get_commands.py::TestGetCommands::test_get_mon_endpoint tests/functional/odf-cli/test_operator_restart.py::TestOperatorRestart::test_operator_restart
[2024-11-22T02:14:08.266Z] tests/functional/odf-cli/test_get_commands.py::TestGetCommands::test_get_health [2024-11-22T02:14:08.266Z] [1m-------------------------------- live log setup --------------------------------[0m [2024-11-22T02:14:08.266Z] 21:14:07 - MainThread - ocs_ci.utility.utils - [32mINFO[0m - testrun_name: dosypenk-OCS4-17-Downstream-OCP4-17-ROSA_HCP-MANAGED_CP-1AZ-RHCOS-0M-3W-tier1 [2024-11-22T02:14:08.266Z] 21:14:07 - MainThread - tests.conftest - [32mINFO[0m - Checking for Ceph Health OK [2024-11-22T02:14:08.266Z] 21:14:07 - MainThread - ocs_ci.utility.utils - [32mINFO[0m - Executing command: oc --kubeconfig /home/jenkins/current-cluster-dir/openshift-cluster-dir/auth/kubeconfig -n odf-storage get Pod -n odf-storage --selector=app=rook-ceph-tools -o yaml [2024-11-22T02:14:08.520Z] 21:14:08 - MainThread - ocs_ci.utility.utils - [32mINFO[0m - Executing command: oc --kubeconfig /home/jenkins/current-cluster-dir/openshift-cluster-dir/auth/kubeconfig -n odf-storage get Pod -n odf-storage --selector=app=rook-ceph-tools -o yaml [2024-11-22T02:14:09.081Z] 21:14:08 - MainThread - ocs_ci.ocs.resources.pod - [32mINFO[0m - These are the ceph tool box pods: ['rook-ceph-tools-58c64c465f-jsfbw'] [2024-11-22T02:14:09.081Z] 21:14:08 - MainThread - ocs_ci.utility.utils - [32mINFO[0m - Executing command: oc --kubeconfig /home/jenkins/current-cluster-dir/openshift-cluster-dir/auth/kubeconfig -n odf-storage get Pod rook-ceph-tools-58c64c465f-jsfbw -n odf-storage [2024-11-22T02:14:09.639Z] 21:14:09 - MainThread - ocs_ci.utility.utils - [32mINFO[0m - Executing command: oc --kubeconfig /home/jenkins/current-cluster-dir/openshift-cluster-dir/auth/kubeconfig -n odf-storage get Pod -n odf-storage -o yaml [2024-11-22T02:14:12.893Z] 21:14:12 - MainThread - ocs_ci.ocs.resources.pod - [32mINFO[0m - Pod name: rook-ceph-tools-58c64c465f-jsfbw [2024-11-22T02:14:12.893Z] 21:14:12 - MainThread - ocs_ci.ocs.resources.pod - [32mINFO[0m - Pod status: Running [2024-11-22T02:14:12.893Z] 21:14:12 - MainThread - ocs_ci.utility.utils - [32mINFO[0m - Executing command: oc -n odf-storage rsh rook-ceph-tools-58c64c465f-jsfbw ceph health [2024-11-22T02:14:14.775Z] 21:14:14 - MainThread - ocs_ci.utility.utils - [32mINFO[0m - Ceph cluster health is HEALTH_OK. [2024-11-22T02:14:14.775Z] 21:14:14 - MainThread - tests.conftest - [32mINFO[0m - Ceph health check passed at setup [2024-11-22T02:14:14.775Z] 21:14:14 - MainThread - ocs_ci.utility.utils - [32mINFO[0m - Executing command: ['oc', 'login', '-u', 'cluster-admin', '-p', '*****'] [2024-11-22T02:14:18.038Z] 21:14:17 - MainThread - ocs_ci.utility.utils - [32mINFO[0m - Executing command: oc --kubeconfig /home/jenkins/current-cluster-dir/openshift-cluster-dir/auth/kubeconfig -n openshift-monitoring whoami --show-token [2024-11-22T02:14:18.292Z] 21:14:18 - MainThread - ocs_ci.utility.utils - [32mINFO[0m - Executing command: oc --kubeconfig /home/jenkins/current-cluster-dir/openshift-cluster-dir/auth/kubeconfig -n openshift-monitoring get Route prometheus-k8s -n openshift-monitoring -o yaml [2024-11-22T02:14:19.233Z] 21:14:19 - MainThread - tests.functional.conftest - [32mINFO[0m - ODF CLI binary downloaded and ODFCliRunner initialized successfully [2024-11-22T02:14:19.233Z] 21:14:19 - MainThread - ocs_ci.framework.pytest_customization.reports - [32mINFO[0m - duration reported by tests/functional/odf-cli/test_get_commands.py::TestGetCommands::test_get_health immediately after test execution: 11.15 [2024-11-22T02:14:19.233Z] [1m-------------------------------- live log call ---------------------------------[0m [2024-11-22T02:14:19.233Z] 21:14:19 - MainThread - ocs_ci.utility.utils - [32mINFO[0m - Executing command: odf get health [2024-11-22T02:14:19.488Z] 21:14:19 - MainThread - ocs_ci.utility.utils - [33mWARNING[0m - Command stderr: Error: Operator namespace 'openshift-storage' does not exist. namespaces "openshift-storage" not found [2024-11-22T02:14:19.488Z] [2024-11-22T02:14:19.488Z] 21:14:19 - MainThread - ocs_ci.framework.pytest_customization.reports - [32mINFO[0m - duration reported by tests/functional/odf-cli/test_get_commands.py::TestGetCommands::test_get_health immediately after test execution: 0.39 [2024-11-22T02:14:20.048Z] [31mFAILED[0m [2024-11-22T02:14:20.048Z] _______________________ TestGetCommands.test_get_health ________________________ [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] self = <test_get_commands.TestGetCommands object at 0x7f625269bee0> [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] @polarion_id("OCS-6237") [2024-11-22T02:14:20.048Z] def test_get_health(self): [2024-11-22T02:14:20.048Z] > output = self.odf_cli_runner.run_get_health() [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] [1m[31mtests/functional/odf-cli/test_get_commands.py[0m:21: [2024-11-22T02:14:20.048Z] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ [2024-11-22T02:14:20.048Z] [1m[31mocs_ci/helpers/odf_cli.py[0m:133: in run_get_health [2024-11-22T02:14:20.048Z] return self.run_command(" get health") [2024-11-22T02:14:20.048Z] [1m[31mocs_ci/helpers/odf_cli.py[0m:124: in run_command [2024-11-22T02:14:20.048Z] output = exec_cmd(full_command) [2024-11-22T02:14:20.048Z] _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] cmd = ['odf', 'get', 'health'], secrets = None, timeout = 600 [2024-11-22T02:14:20.048Z] ignore_error = False, threading_lock = None, silent = False, use_shell = False [2024-11-22T02:14:20.048Z] cluster_config = None, lock_timeout = 7200, kwargs = {} [2024-11-22T02:14:20.048Z] masked_cmd = 'odf get health' [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] def exec_cmd( [2024-11-22T02:14:20.048Z] cmd, [2024-11-22T02:14:20.048Z] secrets=None, [2024-11-22T02:14:20.048Z] timeout=600, [2024-11-22T02:14:20.048Z] ignore_error=False, [2024-11-22T02:14:20.048Z] threading_lock=None, [2024-11-22T02:14:20.048Z] silent=False, [2024-11-22T02:14:20.048Z] use_shell=False, [2024-11-22T02:14:20.048Z] cluster_config=None, [2024-11-22T02:14:20.048Z] lock_timeout=7200, [2024-11-22T02:14:20.048Z] **kwargs, [2024-11-22T02:14:20.048Z] ): [2024-11-22T02:14:20.048Z] """ [2024-11-22T02:14:20.048Z] Run an arbitrary command locally [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] If the command is grep and matching pattern is not found, then this function [2024-11-22T02:14:20.048Z] returns "command terminated with exit code 1" in stderr. [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] Args: [2024-11-22T02:14:20.048Z] cmd (str): command to run [2024-11-22T02:14:20.048Z] secrets (list): A list of secrets to be masked with asterisks [2024-11-22T02:14:20.048Z] This kwarg is popped in order to not interfere with [2024-11-22T02:14:20.048Z] subprocess.run(``**kwargs``) [2024-11-22T02:14:20.048Z] timeout (int): Timeout for the command, defaults to 600 seconds. [2024-11-22T02:14:20.048Z] ignore_error (bool): True if ignore non zero return code and do not [2024-11-22T02:14:20.048Z] raise the exception. [2024-11-22T02:14:20.048Z] threading_lock (threading.RLock): threading.RLock object that is used [2024-11-22T02:14:20.048Z] for handling concurrent oc commands [2024-11-22T02:14:20.048Z] silent (bool): If True will silent errors from the server, default false [2024-11-22T02:14:20.048Z] use_shell (bool): If True will pass the cmd without splitting [2024-11-22T02:14:20.048Z] cluster_config (MultiClusterConfig): In case of multicluster environment this object [2024-11-22T02:14:20.048Z] will be non-null [2024-11-22T02:14:20.048Z] lock_timeout (int): maximum timeout to wait for lock to prevent deadlocks (default 2 hours) [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] Raises: [2024-11-22T02:14:20.048Z] CommandFailed: In case the command execution fails [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] Returns: [2024-11-22T02:14:20.048Z] (CompletedProcess) A CompletedProcess object of the command that was executed [2024-11-22T02:14:20.048Z] CompletedProcess attributes: [2024-11-22T02:14:20.048Z] args: The list or str args passed to run(). [2024-11-22T02:14:20.048Z] returncode (str): The exit code of the process, negative for signals. [2024-11-22T02:14:20.048Z] stdout (str): The standard output (None if not captured). [2024-11-22T02:14:20.048Z] stderr (str): The standard error (None if not captured). [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] """ [2024-11-22T02:14:20.048Z] masked_cmd = mask_secrets(cmd, secrets) [2024-11-22T02:14:20.048Z] log.info(f"Executing command: {masked_cmd}") [2024-11-22T02:14:20.048Z] if isinstance(cmd, str) and not kwargs.get("shell"): [2024-11-22T02:14:20.048Z] cmd = shlex.split(cmd) [2024-11-22T02:14:20.048Z] if config.RUN.get("custom_kubeconfig_location") and cmd[0] == "oc": [2024-11-22T02:14:20.048Z] if "--kubeconfig" in cmd: [2024-11-22T02:14:20.048Z] cmd.pop(2) [2024-11-22T02:14:20.048Z] cmd.pop(1) [2024-11-22T02:14:20.048Z] cmd = list_insert_at_position(cmd, 1, ["--kubeconfig"]) [2024-11-22T02:14:20.048Z] cmd = list_insert_at_position( [2024-11-22T02:14:20.048Z] cmd, 2, [config.RUN["custom_kubeconfig_location"]] [2024-11-22T02:14:20.048Z] ) [2024-11-22T02:14:20.048Z] if cluster_config and cmd[0] == "oc" and "--kubeconfig" not in cmd: [2024-11-22T02:14:20.048Z] kubepath = cluster_config.RUN["kubeconfig"] [2024-11-22T02:14:20.048Z] kube_index = 1 [2024-11-22T02:14:20.048Z] # check if we have an oc plugin in the command [2024-11-22T02:14:20.048Z] plugin_list = "oc plugin list" [2024-11-22T02:14:20.048Z] cp = subprocess.run( [2024-11-22T02:14:20.048Z] shlex.split(plugin_list), [2024-11-22T02:14:20.048Z] stdout=subprocess.PIPE, [2024-11-22T02:14:20.048Z] stderr=subprocess.PIPE, [2024-11-22T02:14:20.048Z] ) [2024-11-22T02:14:20.048Z] subcmd = cmd[1].split("-") [2024-11-22T02:14:20.048Z] if len(subcmd) > 1: [2024-11-22T02:14:20.048Z] subcmd = "_".join(subcmd) [2024-11-22T02:14:20.048Z] if not isinstance(subcmd, str) and isinstance(subcmd, list): [2024-11-22T02:14:20.048Z] subcmd = str(subcmd[0]) [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] for l in cp.stdout.decode().splitlines(): [2024-11-22T02:14:20.048Z] if subcmd in l: [2024-11-22T02:14:20.048Z] # If oc cmdline has plugin name then we need to push the [2024-11-22T02:14:20.048Z] # --kubeconfig to next index [2024-11-22T02:14:20.048Z] kube_index = 2 [2024-11-22T02:14:20.048Z] log.info(f"Found oc plugin {subcmd}") [2024-11-22T02:14:20.048Z] cmd = list_insert_at_position(cmd, kube_index, ["--kubeconfig"]) [2024-11-22T02:14:20.048Z] cmd = list_insert_at_position(cmd, kube_index + 1, [kubepath]) [2024-11-22T02:14:20.048Z] try: [2024-11-22T02:14:20.048Z] if threading_lock and cmd[0] == "oc": [2024-11-22T02:14:20.048Z] threading_lock.acquire(timeout=lock_timeout) [2024-11-22T02:14:20.048Z] completed_process = subprocess.run( [2024-11-22T02:14:20.048Z] cmd, [2024-11-22T02:14:20.048Z] stdout=subprocess.PIPE, [2024-11-22T02:14:20.048Z] stderr=subprocess.PIPE, [2024-11-22T02:14:20.048Z] stdin=subprocess.PIPE, [2024-11-22T02:14:20.048Z] timeout=timeout, [2024-11-22T02:14:20.048Z] **kwargs, [2024-11-22T02:14:20.048Z] ) [2024-11-22T02:14:20.048Z] finally: [2024-11-22T02:14:20.048Z] if threading_lock and cmd[0] == "oc": [2024-11-22T02:14:20.048Z] threading_lock.release() [2024-11-22T02:14:20.048Z] masked_stdout = mask_secrets(completed_process.stdout.decode(), secrets) [2024-11-22T02:14:20.048Z] if len(completed_process.stdout) > 0: [2024-11-22T02:14:20.048Z] log.debug(f"Command stdout: {masked_stdout}") [2024-11-22T02:14:20.048Z] else: [2024-11-22T02:14:20.048Z] log.debug("Command stdout is empty") [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] masked_stderr = mask_secrets(completed_process.stderr.decode(), secrets) [2024-11-22T02:14:20.048Z] if len(completed_process.stderr) > 0: [2024-11-22T02:14:20.048Z] if not silent: [2024-11-22T02:14:20.048Z] log.warning(f"Command stderr: {masked_stderr}") [2024-11-22T02:14:20.048Z] else: [2024-11-22T02:14:20.048Z] log.debug("Command stderr is empty") [2024-11-22T02:14:20.048Z] log.debug(f"Command return code: {completed_process.returncode}") [2024-11-22T02:14:20.048Z] if completed_process.returncode and not ignore_error: [2024-11-22T02:14:20.048Z] masked_stderr = bin_xml_escape(filter_out_emojis(masked_stderr)) [2024-11-22T02:14:20.048Z] if ( [2024-11-22T02:14:20.048Z] "grep" in masked_cmd [2024-11-22T02:14:20.048Z] and b"command terminated with exit code 1" in completed_process.stderr [2024-11-22T02:14:20.048Z] ): [2024-11-22T02:14:20.048Z] log.info(f"No results found for grep command: {masked_cmd}") [2024-11-22T02:14:20.048Z] else: [2024-11-22T02:14:20.048Z] > raise CommandFailed( [2024-11-22T02:14:20.048Z] f"Error during execution of command: {masked_cmd}." [2024-11-22T02:14:20.048Z] f"\nError is {masked_stderr}" [2024-11-22T02:14:20.048Z] ) [2024-11-22T02:14:20.048Z] [1m[31mE ocs_ci.ocs.exceptions.CommandFailed: Error during execution of command: odf get health.[0m [2024-11-22T02:14:20.048Z] [1m[31mE Error is Error: Operator namespace 'openshift-storage' does not exist. namespaces "openshift-storage" not found[0m [2024-11-22T02:14:20.048Z] [2024-11-22T02:14:20.048Z] [1m[31mocs_ci/utility/utils.py[0m:710: CommandFailed
three tests have similar issue:
tests/functional/odf-cli/test_get_commands.py::TestGetCommands::test_get_health tests/functional/odf-cli/test_get_commands.py::TestGetCommands::test_get_mon_endpoint tests/functional/odf-cli/test_operator_restart.py::TestOperatorRestart::test_operator_restart