Open lalalapotter opened 1 year ago
As introduced in document, we currently support Chronos on Python 3.7 ~ 3.9. But according to logs, the environment is Python 3.10. Could they run on Python3.9 and see whether issue still exists?
Hi Thank you for the update i have downgraded and running on my PRC server now i am seeing below error
Exception Traceback (most recent call last) [... skipping hidden 1 frame]
Cell In[10], line 2 1 from bigdl.chronos.autots.model.auto_prophet import AutoProphet ----> 2 auto_prophet = AutoProphet() 3 auto_prophet.fit(data=train_data, 4 cross_validation=True, 5 freq="1D")
File ~/.local/lib/python3.9/site-packages/bigdl/chronos/autots/model/auto_prophet.py:112, in AutoProphet.init(self, changepoint_prior_scale, seasonality_prior_scale, holidays_prior_scale, seasonality_mode, changepoint_range, metric, metric_mode, logs_dir, cpus_per_trial, name, remote_dir, load_dir, **prophet_config) 111 model_builder = ProphetBuilder() --> 112 self.auto_est = AutoEstimator(model_builder=model_builder, 113 logs_dir=logs_dir, 114 resources_per_trial={"cpu": cpus_per_trial}, 115 remote_dir=remote_dir, 116 name=name) 117 except ImportError:
File ~/.local/lib/python3.9/site-packages/bigdl/orca/automl/auto_estimator.py:53, in AutoEstimator.init(self, model_builder, logs_dir, resources_per_trial, remote_dir, name) 52 self.model_builder = model_builder ---> 53 self.searcher = SearchEngineFactory.create_engine( 54 backend="ray", 55 logs_dir=logs_dir, 56 resources_per_trial=resources_per_trial, 57 remote_dir=remote_dir, 58 name=name) 59 self._fitted = False
File ~/.local/lib/python3.9/site-packages/bigdl/orca/automl/search/init.py:25, in SearchEngineFactory.create_engine(backend, *args, *kwargs) 24 from bigdl.orca.automl.search.ray_tune import RayTuneSearchEngine ---> 25 return RayTuneSearchEngine(args, **kwargs)
File ~/.local/lib/python3.9/site-packages/bigdl/orca/automl/search/ray_tune/ray_tune_search_engine.py:53, in RayTuneSearchEngine.init(self, logs_dir, resources_per_trial, name, remote_dir) 52 self.name = name ---> 53 self.remote_dir = remote_dir or RayTuneSearchEngine.get_default_remote_dir(name) 54 self.logs_dir = os.path.abspath(os.path.expanduser(logs_dir))
File ~/.local/lib/python3.9/site-packages/bigdl/orca/automl/search/ray_tune/ray_tune_search_engine.py:60, in RayTuneSearchEngine.get_default_remote_dir(name) 59 from bigdl.orca.automl.search.utils import process ---> 60 ray_ctx = OrcaRayContext.get() 61 if ray_ctx.is_local:
File ~/.local/lib/python3.9/site-packages/bigdl/orca/ray/raycontext.py:103, in OrcaRayContext.get(cls, initialize) 102 if initialize and not ray_ctx.initialized: --> 103 ray_ctx.init() 104 return ray_ctx
File ~/.local/lib/python3.9/site-packages/bigdl/orca/ray/raycontext.py:77, in OrcaRayContext.init(self, driver_cores) 76 else: ---> 77 results = self._ray_on_spark_context.init(driver_cores=driver_cores) 78 self.num_ray_nodes = self._ray_on_spark_context.num_ray_nodes # type: ignore
File ~/.local/lib/python3.9/site-packages/bigdl/orca/ray/ray_on_spark_context.py:601, in RayOnSparkContext.init(self, driver_cores) 600 init_params.update(kwargs) --> 601 self._address_info = ray.init(**init_params) 602 else:
File ~/.local/lib/python3.9/site-packages/ray/_private/client_mode_hook.py:103, in client_mode_hook.
File ~/.local/lib/python3.9/site-packages/ray/_private/worker.py:1514, in init(address, num_cpus, num_gpus, resources, labels, object_store_memory, local_mode, ignore_reinit_error, include_dashboard, dashboard_host, dashboard_port, job_config, configure_logging, logging_level, logging_format, log_to_driver, namespace, runtime_env, storage, **kwargs) 1510 # Start the Ray processes. We set shutdown_at_exit=False because we 1511 # shutdown the node in the ray.shutdown call that happens in the atexit 1512 # handler. We still spawn a reaper process in case the atexit handler 1513 # isn't called. -> 1514 _global_node = ray._private.node.Node( 1515 head=True, 1516 shutdown_at_exit=False, 1517 spawn_reaper=True, 1518 ray_params=ray_params, 1519 ) 1520 else: 1521 # In this case, we are connecting to an existing cluster.
File ~/.local/lib/python3.9/site-packages/ray/_private/node.py:287, in Node.init(self, ray_params, head, shutdown_at_exit, spawn_reaper, connect_only, default_worker) 286 if head: --> 287 self.start_head_processes() 289 if not connect_only:
File ~/.local/lib/python3.9/site-packages/ray/_private/node.py:1181, in Node.start_head_processes(self) 1179 raise_on_api_server_failure = True -> 1181 self.start_api_server( 1182 include_dashboard=include_dashboard, 1183 raise_on_failure=raise_on_api_server_failure, 1184 )
File ~/.local/lib/python3.9/site-packages/ray/_private/node.py:931, in Node.start_api_server(self, include_dashboard, raise_onfailure) 928 , stderr_file = self.get_log_file_handles( 929 "dashboard", unique=True, create_out=False 930 ) --> 931 self._webui_url, process_info = ray._private.services.start_api_server( 932 include_dashboard, 933 raise_on_failure, 934 self._ray_params.dashboard_host, 935 self.gcs_address, 936 self._node_ip_address, 937 self._temp_dir, 938 self._logs_dir, 939 self._session_dir, 940 port=self._ray_params.dashboard_port, 941 dashboard_grpc_port=self._ray_params.dashboard_grpc_port, 942 fate_share=self.kernel_fate_share, 943 max_bytes=self.max_bytes, 944 backup_count=self.backup_count, 945 redirect_logging=self.should_redirect_logs(), 946 stdout_file=stderr_file, 947 stderr_file=stderr_file, 948 ) 949 assert ray_constants.PROCESS_TYPE_DASHBOARD not in self.all_processes
File ~/.local/lib/python3.9/site-packages/ray/_private/services.py:1274, in start_api_server(include_dashboard, raise_on_failure, host, gcs_address, node_ip_address, temp_dir, logdir, session_dir, port, dashboard_grpc_port, fate_share, max_bytes, backup_count, redirect_logging, stdout_file, stderr_file) 1273 if raise_on_failure: -> 1274 raise e from e 1275 else:
File ~/.local/lib/python3.9/site-packages/ray/_private/services.py:1262, in start_api_server(include_dashboard, raise_on_failure, host, gcs_address, node_ip_address, temp_dir, logdir, session_dir, port, dashboard_grpc_port, fate_share, max_bytes, backup_count, redirect_logging, stdout_file, stderr_file) 1261 last_log_str = "\n" + "\n".join(reversed(lines[-lines_to_read:])) -> 1262 raise Exception(last_log_str) 1263 else: 1264 # Is it reachable?
Exception: The last 20 lines of /tmp/ray/session_2023-07-26_22-39-59_815545_1359886/logs/dashboard.err (it contains the error message from the dashboard): import site = 1 sys._base_executable = '/usr/bin/python3' sys.base_prefix = '/usr' sys.base_exec_prefix = '/usr' sys.platlibdir = 'lib' sys.executable = '/usr/bin/python3' sys.prefix = '/usr' sys.exec_prefix = '/usr' sys.path = [ '/home/siawchen/Downloads/spark-3.2.2-bin-hadoop3.2/python/lib/pyspark.zip', '/home/siawchen/Downloads/spark-3.2.2-bin-hadoop3.2/python/lib/py4j-0.10.9.5-src.zip', '/usr/lib/python39.zip', '/usr/lib/python3.9', '/usr/lib/python3.9/lib-dynload', ] Fatal Python error: init_fs_encoding: failed to get the Python codec of the filesystem encoding Python runtime state: core initialized ModuleNotFoundError: No module named 'encodings'
Current thread 0x00007f424a5a3740 (most recent call first):
The above exception was the direct cause of the following exception:
Exception Traceback (most recent call last) [... skipping hidden 1 frame]
Cell In[10], line 2 1 from bigdl.chronos.autots.model.auto_prophet import AutoProphet ----> 2 auto_prophet = AutoProphet() 3 auto_prophet.fit(data=train_data, 4 cross_validation=True, 5 freq="1D")
File ~/.local/lib/python3.9/site-packages/bigdl/chronos/autots/model/auto_prophet.py:112, in AutoProphet.init(self, changepoint_prior_scale, seasonality_prior_scale, holidays_prior_scale, seasonality_mode, changepoint_range, metric, metric_mode, logs_dir, cpus_per_trial, name, remote_dir, load_dir, **prophet_config) 111 model_builder = ProphetBuilder() --> 112 self.auto_est = AutoEstimator(model_builder=model_builder, 113 logs_dir=logs_dir, 114 resources_per_trial={"cpu": cpus_per_trial}, 115 remote_dir=remote_dir, 116 name=name) 117 except ImportError:
File ~/.local/lib/python3.9/site-packages/bigdl/orca/automl/auto_estimator.py:53, in AutoEstimator.init(self, model_builder, logs_dir, resources_per_trial, remote_dir, name) 52 self.model_builder = model_builder ---> 53 self.searcher = SearchEngineFactory.create_engine( 54 backend="ray", 55 logs_dir=logs_dir, 56 resources_per_trial=resources_per_trial, 57 remote_dir=remote_dir, 58 name=name) 59 self._fitted = False
File ~/.local/lib/python3.9/site-packages/bigdl/orca/automl/search/init.py:25, in SearchEngineFactory.create_engine(backend, *args, *kwargs) 24 from bigdl.orca.automl.search.ray_tune import RayTuneSearchEngine ---> 25 return RayTuneSearchEngine(args, **kwargs)
File ~/.local/lib/python3.9/site-packages/bigdl/orca/automl/search/ray_tune/ray_tune_search_engine.py:53, in RayTuneSearchEngine.init(self, logs_dir, resources_per_trial, name, remote_dir) 52 self.name = name ---> 53 self.remote_dir = remote_dir or RayTuneSearchEngine.get_default_remote_dir(name) 54 self.logs_dir = os.path.abspath(os.path.expanduser(logs_dir))
File ~/.local/lib/python3.9/site-packages/bigdl/orca/automl/search/ray_tune/ray_tune_search_engine.py:60, in RayTuneSearchEngine.get_default_remote_dir(name) 59 from bigdl.orca.automl.search.utils import process ---> 60 ray_ctx = OrcaRayContext.get() 61 if ray_ctx.is_local:
File ~/.local/lib/python3.9/site-packages/bigdl/orca/ray/raycontext.py:103, in OrcaRayContext.get(cls, initialize) 102 if initialize and not ray_ctx.initialized: --> 103 ray_ctx.init() 104 return ray_ctx
File ~/.local/lib/python3.9/site-packages/bigdl/orca/ray/raycontext.py:77, in OrcaRayContext.init(self, driver_cores) 76 else: ---> 77 results = self._ray_on_spark_context.init(driver_cores=driver_cores) 78 self.num_ray_nodes = self._ray_on_spark_context.num_ray_nodes # type: ignore
File ~/.local/lib/python3.9/site-packages/bigdl/orca/ray/ray_on_spark_context.py:601, in RayOnSparkContext.init(self, driver_cores) 600 init_params.update(kwargs) --> 601 self._address_info = ray.init(**init_params) 602 else:
File ~/.local/lib/python3.9/site-packages/ray/_private/client_mode_hook.py:103, in client_mode_hook.
File ~/.local/lib/python3.9/site-packages/ray/_private/worker.py:1514, in init(address, num_cpus, num_gpus, resources, labels, object_store_memory, local_mode, ignore_reinit_error, include_dashboard, dashboard_host, dashboard_port, job_config, configure_logging, logging_level, logging_format, log_to_driver, namespace, runtime_env, storage, **kwargs) 1510 # Start the Ray processes. We set shutdown_at_exit=False because we 1511 # shutdown the node in the ray.shutdown call that happens in the atexit 1512 # handler. We still spawn a reaper process in case the atexit handler 1513 # isn't called. -> 1514 _global_node = ray._private.node.Node( 1515 head=True, 1516 shutdown_at_exit=False, 1517 spawn_reaper=True, 1518 ray_params=ray_params, 1519 ) 1520 else: 1521 # In this case, we are connecting to an existing cluster.
File ~/.local/lib/python3.9/site-packages/ray/_private/node.py:287, in Node.init(self, ray_params, head, shutdown_at_exit, spawn_reaper, connect_only, default_worker) 286 if head: --> 287 self.start_head_processes() 289 if not connect_only:
File ~/.local/lib/python3.9/site-packages/ray/_private/node.py:1181, in Node.start_head_processes(self) 1179 raise_on_api_server_failure = True -> 1181 self.start_api_server( 1182 include_dashboard=include_dashboard, 1183 raise_on_failure=raise_on_api_server_failure, 1184 )
File ~/.local/lib/python3.9/site-packages/ray/_private/node.py:931, in Node.start_api_server(self, include_dashboard, raise_onfailure) 928 , stderr_file = self.get_log_file_handles( 929 "dashboard", unique=True, create_out=False 930 ) --> 931 self._webui_url, process_info = ray._private.services.start_api_server( 932 include_dashboard, 933 raise_on_failure, 934 self._ray_params.dashboard_host, 935 self.gcs_address, 936 self._node_ip_address, 937 self._temp_dir, 938 self._logs_dir, 939 self._session_dir, 940 port=self._ray_params.dashboard_port, 941 dashboard_grpc_port=self._ray_params.dashboard_grpc_port, 942 fate_share=self.kernel_fate_share, 943 max_bytes=self.max_bytes, 944 backup_count=self.backup_count, 945 redirect_logging=self.should_redirect_logs(), 946 stdout_file=stderr_file, 947 stderr_file=stderr_file, 948 ) 949 assert ray_constants.PROCESS_TYPE_DASHBOARD not in self.all_processes
File ~/.local/lib/python3.9/site-packages/ray/_private/services.py:1274, in start_api_server(include_dashboard, raise_on_failure, host, gcs_address, node_ip_address, temp_dir, logdir, session_dir, port, dashboard_grpc_port, fate_share, max_bytes, backup_count, redirect_logging, stdout_file, stderr_file) 1273 if raise_on_failure: -> 1274 raise e from e 1275 else:
File ~/.local/lib/python3.9/site-packages/ray/_private/services.py:1262, in start_api_server(include_dashboard, raise_on_failure, host, gcs_address, node_ip_address, temp_dir, logdir, session_dir, port, dashboard_grpc_port, fate_share, max_bytes, backup_count, redirect_logging, stdout_file, stderr_file) 1261 last_log_str = "\n" + "\n".join(reversed(lines[-lines_to_read:])) -> 1262 raise Exception(last_log_str) 1263 else: 1264 # Is it reachable?
Exception: The last 20 lines of /tmp/ray/session_2023-07-26_22-39-59_815545_1359886/logs/dashboard.err (it contains the error message from the dashboard): import site = 1 sys._base_executable = '/usr/bin/python3' sys.base_prefix = '/usr' sys.base_exec_prefix = '/usr' sys.platlibdir = 'lib' sys.executable = '/usr/bin/python3' sys.prefix = '/usr' sys.exec_prefix = '/usr' sys.path = [ '/home/siawchen/Downloads/spark-3.2.2-bin-hadoop3.2/python/lib/pyspark.zip', '/home/siawchen/Downloads/spark-3.2.2-bin-hadoop3.2/python/lib/py4j-0.10.9.5-src.zip', '/usr/lib/python39.zip', '/usr/lib/python3.9', '/usr/lib/python3.9/lib-dynload', ] Fatal Python error: init_fs_encoding: failed to get the Python codec of the filesystem encoding Python runtime state: core initialized ModuleNotFoundError: No module named 'encodings'
Current thread 0x00007f424a5a3740 (most recent call first):
The above exception was the direct cause of the following exception:
Exception Traceback (most recent call last) Cell In[10], line 2 1 from bigdl.chronos.autots.model.auto_prophet import AutoProphet ----> 2 auto_prophet = AutoProphet() 3 auto_prophet.fit(data=train_data, 4 cross_validation=True, 5 freq="1D") 6 print("Training completed.")
File ~/.local/lib/python3.9/site-packages/bigdl/chronos/autots/model/auto_prophet.py:112, in AutoProphet.init(self, changepoint_prior_scale, seasonality_prior_scale, holidays_prior_scale, seasonality_mode, changepoint_range, metric, metric_mode, logs_dir, cpus_per_trial, name, remote_dir, load_dir, **prophet_config)
110 self.metric_mode = metric_mode
111 model_builder = ProphetBuilder()
--> 112 self.auto_est = AutoEstimator(model_builder=model_builder,
113 logs_dir=logs_dir,
114 resources_per_trial={"cpu": cpus_per_trial},
115 remote_dir=remote_dir,
116 name=name)
117 except ImportError:
118 warnings.warn("You need to install bigdl-orca[automl]
to use fit
function.")
File ~/.local/lib/python3.9/site-packages/bigdl/orca/automl/auto_estimator.py:53, in AutoEstimator.init(self, model_builder, logs_dir, resources_per_trial, remote_dir, name) 46 def init(self, 47 model_builder: "ModelBuilder", 48 logs_dir: str="/tmp/auto_estimator_logs", 49 resources_per_trial: Optional[Dict[str, int]]=None, 50 remote_dir: Optional[str]=None, 51 name: Optional[str]=None) -> None: 52 self.model_builder = model_builder ---> 53 self.searcher = SearchEngineFactory.create_engine( 54 backend="ray", 55 logs_dir=logs_dir, 56 resources_per_trial=resources_per_trial, 57 remote_dir=remote_dir, 58 name=name) 59 self._fitted = False 60 self.best_trial = None
File ~/.local/lib/python3.9/site-packages/bigdl/orca/automl/search/init.py:25, in SearchEngineFactory.create_engine(backend, *args, *kwargs) 23 if backend == "ray": 24 from bigdl.orca.automl.search.ray_tune import RayTuneSearchEngine ---> 25 return RayTuneSearchEngine(args, **kwargs)
File ~/.local/lib/python3.9/site-packages/bigdl/orca/automl/search/ray_tune/ray_tune_search_engine.py:53, in RayTuneSearchEngine.init(self, logs_dir, resources_per_trial, name, remote_dir) 51 self.trials = None 52 self.name = name ---> 53 self.remote_dir = remote_dir or RayTuneSearchEngine.get_default_remote_dir(name) 54 self.logs_dir = os.path.abspath(os.path.expanduser(logs_dir))
File ~/.local/lib/python3.9/site-packages/bigdl/orca/automl/search/ray_tune/ray_tune_search_engine.py:60, in RayTuneSearchEngine.get_default_remote_dir(name) 58 from bigdl.orca.ray import OrcaRayContext 59 from bigdl.orca.automl.search.utils import process ---> 60 ray_ctx = OrcaRayContext.get() 61 if ray_ctx.is_local: 62 return None
File ~/.local/lib/python3.9/site-packages/bigdl/orca/ray/raycontext.py:103, in OrcaRayContext.get(cls, initialize) 101 ray_ctx = OrcaRayContext._active_ray_context 102 if initialize and not ray_ctx.initialized: --> 103 ray_ctx.init() 104 return ray_ctx 105 else:
File ~/.local/lib/python3.9/site-packages/bigdl/orca/ray/raycontext.py:77, in OrcaRayContext.init(self, driver_cores) 75 results = ray.init(**self.ray_args) 76 else: ---> 77 results = self._ray_on_spark_context.init(driver_cores=driver_cores) 78 self.num_ray_nodes = self._ray_on_spark_context.num_ray_nodes # type: ignore 79 self.ray_node_cpu_cores = self._ray_on_spark_context.ray_node_cpu_cores # type: ignore
File ~/.local/lib/python3.9/site-packages/bigdl/orca/ray/ray_on_spark_context.py:601, in RayOnSparkContext.init(self, driver_cores) 599 init_params["_redis_password"] = self.redis_password 600 init_params.update(kwargs) --> 601 self._address_info = ray.init(**init_params) 602 else: 603 self.cluster_ips = self._gather_cluster_ips()
File ~/.local/lib/python3.9/site-packages/ray/_private/client_mode_hook.py:103, in client_mode_hook.
File ~/.local/lib/python3.9/site-packages/ray/_private/worker.py:1514, in init(address, num_cpus, num_gpus, resources, labels, object_store_memory, local_mode, ignore_reinit_error, include_dashboard, dashboard_host, dashboard_port, job_config, configure_logging, logging_level, logging_format, log_to_driver, namespace, runtime_env, storage, **kwargs) 1480 ray_params = ray._private.parameter.RayParams( 1481 node_ip_address=node_ip_address, 1482 raylet_ip_address=raylet_ip_address, (...) 1508 node_name=_node_name, 1509 ) 1510 # Start the Ray processes. We set shutdown_at_exit=False because we 1511 # shutdown the node in the ray.shutdown call that happens in the atexit 1512 # handler. We still spawn a reaper process in case the atexit handler 1513 # isn't called. -> 1514 _global_node = ray._private.node.Node( 1515 head=True, 1516 shutdown_at_exit=False, 1517 spawn_reaper=True, 1518 ray_params=ray_params, 1519 ) 1520 else: 1521 # In this case, we are connecting to an existing cluster. 1522 if num_cpus is not None or num_gpus is not None:
File ~/.local/lib/python3.9/site-packages/ray/_private/node.py:287, in Node.init(self, ray_params, head, shutdown_at_exit, spawn_reaper, connect_only, default_worker) 285 # Start processes. 286 if head: --> 287 self.start_head_processes() 289 if not connect_only: 290 self.start_ray_processes()
File ~/.local/lib/python3.9/site-packages/ray/_private/node.py:1181, in Node.start_head_processes(self) 1178 include_dashboard = True 1179 raise_on_api_server_failure = True -> 1181 self.start_api_server( 1182 include_dashboard=include_dashboard, 1183 raise_on_failure=raise_on_api_server_failure, 1184 )
File ~/.local/lib/python3.9/site-packages/ray/_private/node.py:931, in Node.start_api_server(self, include_dashboard, raise_onfailure) 926 # Only redirect logs to .err. .err file is only useful when the 927 # component has an unexpected output to stdout/stderr. 928 , stderr_file = self.get_log_file_handles( 929 "dashboard", unique=True, create_out=False 930 ) --> 931 self._webui_url, process_info = ray._private.services.start_api_server( 932 include_dashboard, 933 raise_on_failure, 934 self._ray_params.dashboard_host, 935 self.gcs_address, 936 self._node_ip_address, 937 self._temp_dir, 938 self._logs_dir, 939 self._session_dir, 940 port=self._ray_params.dashboard_port, 941 dashboard_grpc_port=self._ray_params.dashboard_grpc_port, 942 fate_share=self.kernel_fate_share, 943 max_bytes=self.max_bytes, 944 backup_count=self.backup_count, 945 redirect_logging=self.should_redirect_logs(), 946 stdout_file=stderr_file, 947 stderr_file=stderr_file, 948 ) 949 assert ray_constants.PROCESS_TYPE_DASHBOARD not in self.all_processes 950 if process_info is not None:
File ~/.local/lib/python3.9/site-packages/ray/_private/services.py:1274, in start_api_server(include_dashboard, raise_on_failure, host, gcs_address, node_ip_address, temp_dir, logdir, session_dir, port, dashboard_grpc_port, fate_share, max_bytes, backup_count, redirect_logging, stdout_file, stderr_file) 1272 except Exception as e: 1273 if raise_on_failure: -> 1274 raise e from e 1275 else: 1276 logger.error(e)
File ~/.local/lib/python3.9/site-packages/ray/_private/services.py:1262, in start_api_server(include_dashboard, raise_on_failure, host, gcs_address, node_ip_address, temp_dir, logdir, session_dir, port, dashboard_grpc_port, fate_share, max_bytes, backup_count, redirect_logging, stdout_file, stderr_file) 1255 raise Exception( 1256 f"Failed to read dashboard.err file: {e}. " 1257 "It is unexpected. Please report an issue to " 1258 "Ray github. " 1259 "https://github.com/ray-project/ray/issues" 1260 ) 1261 last_log_str = "\n" + "\n".join(reversed(lines[-lines_to_read:])) -> 1262 raise Exception(last_log_str) 1263 else: 1264 # Is it reachable? 1265 raise Exception("Failed to start a dashboard.")
Exception: The last 20 lines of /tmp/ray/session_2023-07-26_22-39-59_815545_1359886/logs/dashboard.err (it contains the error message from the dashboard): import site = 1 sys._base_executable = '/usr/bin/python3' sys.base_prefix = '/usr' sys.base_exec_prefix = '/usr' sys.platlibdir = 'lib' sys.executable = '/usr/bin/python3' sys.prefix = '/usr' sys.exec_prefix = '/usr' sys.path = [ '/home/siawchen/Downloads/spark-3.2.2-bin-hadoop3.2/python/lib/pyspark.zip', '/home/siawchen/Downloads/spark-3.2.2-bin-hadoop3.2/python/lib/py4j-0.10.9.5-src.zip', '/usr/lib/python39.zip', '/usr/lib/python3.9', '/usr/lib/python3.9/lib-dynload', ] Fatal Python error: init_fs_encoding: failed to get the Python codec of the filesystem encoding Python runtime state: core initialized ModuleNotFoundError: No module named 'encodings'
Current thread 0x00007f424a5a3740 (most recent call first):
Hi @Madhustat,
The error, "ModuleNotFoundError: No module named 'encodings'", occurs for multiple reasons, mainly because of python environment, please check:
After checking above items, if the issue still exists, please let us know, and we will help you to solve it ASAP.
Users run following code and get issue
Error log: