vitortoledo93 / SEBC

Cloudera Bootcamp repository
0 stars 0 forks source link

Security Lab #4

Closed vitortoledo93 closed 7 years ago

vitortoledo93 commented 7 years ago

Cloudera-scm-agent- Config.ini:


# Hostname of the CM server.
server_host=keyrussrprd01.datacentrics.com.br

# Port that the CM server is listening on.
server_port=7182

## It should not normally be necessary to modify these.
# Port that the CM agent should listen on.
# listening_port=9000

# IP Address that the CM agent should listen on.
# listening_ip=

# Hostname that the CM agent reports as its hostname. If unset, will be
# obtained in code through something like this:
#
#   python -c 'import socket; \
#              print socket.getfqdn(), \
#                    socket.gethostbyname(socket.getfqdn())'
#
# listening_hostname=

# An alternate hostname to report as the hostname for this host in CM.
# Useful when this agent is behind a load balancer or proxy and all
# inbound communication must connect through that proxy.
# reported_hostname=

# Port that supervisord should listen on.
# NB: This only takes effect if supervisord is restarted.
# supervisord_port=19001

# Log file.  The supervisord log file will be placed into
# the same directory.  Note that if the agent is being started via the
# init.d script, /var/log/cloudera-scm-agent/cloudera-scm-agent.out will
# also have a small amount of output (from before logging is initialized).
# log_file=/var/log/cloudera-scm-agent/cloudera-scm-agent.log

# Persistent state directory.  Directory to store CM agent state that
# persists across instances of the agent process and system reboots.
# Particularly, the agent's UUID is stored here.
# lib_dir=/var/lib/cloudera-scm-agent

# Parcel directory.  Unpacked parcels will be stored in this directory.
# Downloaded parcels will be stored in <parcel_dir>/../parcel-cache
# parcel_dir=/opt/cloudera/parcels

# Enable supervisord event monitoring.  Used in eager heartbeating, amongst
# other things.
# enable_supervisord_events=true

# Maximum time to wait (in seconds) for all metric collectors to finish
# collecting data.
max_collection_wait_seconds=10.0

# Maximum time to wait (in seconds) when connecting to a local role's
# webserver to fetch metrics.
metrics_url_timeout_seconds=30.0

# Maximum time to wait (in seconds) when connecting to a local TaskTracker
# to fetch task attempt data.
task_metrics_timeout_seconds=5.0

# The list of non-device (nodev) filesystem types which will be monitored.
monitored_nodev_filesystem_types=nfs,nfs4,tmpfs

# The list of filesystem types which are considered local for monitoring purposes.
# These filesystems are combined with the other local filesystem types found in
# /proc/filesystems
local_filesystem_whitelist=ext2,ext3,ext4,xfs

# The largest size impala profile log bundle that this agent will serve to the
# CM server. If the CM server requests more than this amount, the bundle will
# be limited to this size. All instances of this limit being hit are logged to
# the agent log.
impala_profile_bundle_max_bytes=1073741824

# The largest size stacks log bundle that this agent will serve to the CM
# server. If the CM server requests more than this amount, the bundle will be
# limited to this size. All instances of this limit being hit are logged to the
# agent log.
stacks_log_bundle_max_bytes=1073741824

# The size to which the uncompressed portion of a stacks log can grow before it
# is rotated. The log will then be compressed during rotation.
stacks_log_max_uncompressed_file_size_bytes=5242880

# The orphan process directory staleness threshold. If a diretory is more stale
# than this amount of seconds, CM agent will remove it.
orphan_process_dir_staleness_threshold=5184000

# The orphan process directory refresh interval. The CM agent will check the
# staleness of the orphan processes config directory every this amount of
# seconds.
orphan_process_dir_refresh_interval=3600

# A knob to control the agent logging level. The options are listed as follows:
# 1) DEBUG (set the agent logging level to 'logging.DEBUG')
# 2) INFO (set the agent logging level to 'logging.INFO')
scm_debug=INFO

# The DNS resolution collecion interval in seconds. A java base test program
# will be executed with at most this frequency to collect java DNS resolution
# metrics. The test program is only executed if the associated health test,
# Host DNS Resolution, is enabled.
dns_resolution_collection_interval_seconds=60

# The maximum time to wait (in seconds) for the java test program to collect
# java DNS resolution metrics.
dns_resolution_collection_timeout_seconds=30

# The directory location in which the agent-wide kerberos credential cache
# will be created.
# agent_wide_credential_cache_location=/var/run/cloudera-scm-agent

[Security]
# Use TLS and certificate validation when connecting to the CM server.
use_tls=1

# The maximum allowed depth of the certificate chain returned by the peer.
# The default value of 9 matches the default specified in openssl's
# SSL_CTX_set_verify.
max_cert_depth=9

# A file of CA certificates in PEM format. The file can contain several CA
# certificates identified by
#
# -----BEGIN CERTIFICATE-----
# ... (CA certificate in base64 encoding) ...
# -----END CERTIFICATE-----
#
# sequences. Before, between, and after the certificates text is allowed which
# can be used e.g. for descriptions of the certificates.
#
# The file is loaded once, the first time an HTTPS connection is attempted. A
# restart of the agent is required to pick up changes to the file.
#
# Note that if neither verify_cert_file or verify_cert_dir is set, certificate
# verification will not be performed.
# verify_cert_file=

# Directory containing CA certificates in PEM format. The files each contain one
# CA certificate. The files are looked up by the CA subject name hash value,
# which must hence be available. If more than one CA certificate with the same
# name hash value exist, the extension must be different (e.g. 9d66eef0.0,
# 9d66eef0.1 etc). The search is performed in the ordering of the extension
# number, regardless of other properties of the certificates. Use the c_rehash
# utility to create the necessary links.
#
# The certificates in the directory are only looked up when required, e.g. when
# building the certificate chain or when actually performing the verification
# of a peer certificate. The contents of the directory can thus be changed
# without an agent restart.
#
# When looking up CA certificates, the verify_cert_file is first searched, then
# those in the directory. Certificate matching is done based on the subject name,
# the key identifier (if present), and the serial number as taken from the
# certificate to be verified. If these data do not match, the next certificate
# will be tried. If a first certificate matching the parameters is found, the
# verification process will be performed; no other certificates for the same
# parameters will be searched in case of failure.
#
# Note that if neither verify_cert_file or verify_cert_dir is set, certificate
# verification will not be performed.
# verify_cert_dir=

# PEM file containing client private key.
# client_key_file=

# A command to run which returns the client private key password on stdout
# client_keypw_cmd=

# If client_keypw_cmd isn't specified, instead a text file containing
# the client private key password can be used.
# client_keypw_file=

# PEM file containing client certificate.
# client_cert_file=

## Location of Hadoop files.  These are the CDH locations when installed by
## packages.  Unused when CDH is installed by parcels.
[Hadoop]
#cdh_crunch_home=/usr/lib/crunch
#cdh_flume_home=/usr/lib/flume-ng
#cdh_hadoop_bin=/usr/bin/hadoop
#cdh_hadoop_home=/usr/lib/hadoop
#cdh_hbase_home=/usr/lib/hbase
#cdh_hbase_indexer_home=/usr/lib/hbase-solr
#cdh_hcat_home=/usr/lib/hive-hcatalog
#cdh_hdfs_home=/usr/lib/hadoop-hdfs
#cdh_hive_home=/usr/lib/hive
#cdh_httpfs_home=/usr/lib/hadoop-httpfs
#cdh_hue_home=/usr/share/hue
#cdh_hue_plugins_home=/usr/lib/hadoop
#cdh_impala_home=/usr/lib/impala
#cdh_llama_home=/usr/lib/llama
#cdh_mr1_home=/usr/lib/hadoop-0.20-mapreduce
#cdh_mr2_home=/usr/lib/hadoop-mapreduce
#cdh_oozie_home=/usr/lib/oozie
#cdh_parquet_home=/usr/lib/parquet
#cdh_pig_home=/usr/lib/pig
#cdh_solr_home=/usr/lib/solr
#cdh_spark_home=/usr/lib/spark
#cdh_sqoop_home=/usr/lib/sqoop
#cdh_sqoop2_home=/usr/lib/sqoop2
#cdh_yarn_home=/usr/lib/hadoop-yarn
#cdh_zookeeper_home=/usr/lib/zookeeper
#hive_default_xml=/etc/hive/conf.dist/hive-default.xml
#webhcat_default_xml=/etc/hive-webhcat/conf.dist/webhcat-default.xml
#jsvc_home=/usr/libexec/bigtop-utils
#tomcat_home=/usr/lib/bigtop-tomcat
#oracle_home=/usr/share/oracle/instantclient

## Location of Cloudera Management Services files.
[Cloudera]
#mgmt_home=/usr/share/cmf

## Location of JDBC Drivers.
[JDBC]
#cloudera_mysql_connector_jar=/usr/share/java/mysql-connector-java.jar
#cloudera_oracle_connector_jar=/usr/share/java/oracle-connector-java.jar
#By default, postgres jar is found dynamically in $MGMT_HOME/lib
#cloudera_postgresql_jdbc_jar=```
vitortoledo93 commented 7 years ago

Use a screen capture of CM to show TLS level 1 is enabled. image

vitortoledo93 commented 7 years ago

/etc/krb5.conf:


[logging]
 default = FILE:/var/log/krb5libs.log
 kdc = FILE:/var/log/krb5kdc.log
 admin_server = FILE:/var/log/kadmind.log

[libdefaults]
 default_realm = DATACENTRICS.COM
 dns_lookup_realm = false
 dns_lookup_kdc = false
 ticket_lifetime = 24h
 renew_lifetime = 7d
 forwardable = true
 udp_preference_limit = 1
 default_tgs_enctypes = arcfour-hmac
 default_tkt_enctypes = arcfour-hmac

[realms]
 DATACENTRICS.COM  = {
  kdc = keyrussrprd01.datacentrics.com.br
  admin_server = keyrussrprd01.datacentrics.com.br
 }

[domain_realm]
   .datacentrics.com = DATACENTRICS.COM
   datacentrics.com = DATACENTRICS.COM```
vitortoledo93 commented 7 years ago

/var/kerberos/krb5kdc/kdc.conf:

 kdc_ports = 88
 kdc_tcp_ports = 88

[realms]
  DATACENTRICS.COM = {
  #master_key_type = aes256-cts
  acl_file = /var/kerberos/krb5kdc/kadm5.acl
  dict_file = /usr/share/dict/words
  admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
  supported_enctypes = aes256-cts:normal aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
  max_life = 1d
  max_renewable_life = 7d
 }
vitortoledo93 commented 7 years ago

kadm5.acl */admin@DATACENTRICS.COM *

vitortoledo93 commented 7 years ago

Kinit and the Klist Informations: image

vitortoledo93 commented 7 years ago

Sentry First step, showing tables:


scan complete in 2ms
Connecting to jdbc:hive2://keyrussrprd01.datacentrics.com.br:10000/default;principal=hive/keyrussrprd01.datacentrics.com.br@DATACENTRICS.COM
Connected to: Apache Hive (version 1.1.0-cdh5.10.1)
Driver: Hive JDBC (version 1.1.0-cdh5.10.1)
Transaction isolation: TRANSACTION_REPEATABLE_READ
Beeline version 1.1.0-cdh5.10.1 by Apache Hive
0: jdbc:hive2://keyrussrprd01.datacentrics.co> show tables;
INFO  : Compiling command(queryId=hive_20170405202222_8dfaf025-2705-4344-a350-a5aad38eb40c): show tables
INFO  : Semantic Analysis Completed
INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:tab_name, type:string, comment:from deserializer)], properties:null)
INFO  : Completed compiling command(queryId=hive_20170405202222_8dfaf025-2705-4344-a350-a5aad38eb40c); Time taken: 0.059 seconds
INFO  : Executing command(queryId=hive_20170405202222_8dfaf025-2705-4344-a350-a5aad38eb40c): show tables
INFO  : Starting task [Stage-0:DDL] in serial mode
INFO  : Completed executing command(queryId=hive_20170405202222_8dfaf025-2705-4344-a350-a5aad38eb40c); Time taken: 0.117 seconds
INFO  : OK
+-----------+--+
| tab_name  |
+-----------+--+
+-----------+--+
No rows selected (0.269 seconds)
0: jdbc:hive2://keyrussrprd01.datacentrics.co>
vitortoledo93 commented 7 years ago

Showing tables with George And Ferdinand


[root@keyrussrprd01 ~]# kinit george
Password for george@DATACENTRICS.COM:
[root@keyrussrprd01 ~]# beeline -u "jdbc:hive2://keyrussrprd01.datacentrics.com.br:10000/default;principal=hive/keyrussrprd01.datacentrics.com.br@DATACENTRICS.COM"
scan complete in 2ms
Connecting to jdbc:hive2://keyrussrprd01.datacentrics.com.br:10000/default;principal=hive/keyrussrprd01.datacentrics.com.br@DATACENTRICS.COM
Connected to: Apache Hive (version 1.1.0-cdh5.10.1)
Driver: Hive JDBC (version 1.1.0-cdh5.10.1)
Transaction isolation: TRANSACTION_REPEATABLE_READ
Beeline version 1.1.0-cdh5.10.1 by Apache Hive
0: jdbc:hive2://keyrussrprd01.datacentrics.co> show tables;
INFO  : Compiling command(queryId=hive_20170405203333_067bdf79-cf44-4d7c-85c2-355a7a1932ab): show tables
INFO  : Semantic Analysis Completed
INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:tab_name, type:string, comment:from deserializer)], properties:null)
INFO  : Completed compiling command(queryId=hive_20170405203333_067bdf79-cf44-4d7c-85c2-355a7a1932ab); Time taken: 0.066 seconds
INFO  : Executing command(queryId=hive_20170405203333_067bdf79-cf44-4d7c-85c2-355a7a1932ab): show tables
INFO  : Starting task [Stage-0:DDL] in serial mode
INFO  : Completed executing command(queryId=hive_20170405203333_067bdf79-cf44-4d7c-85c2-355a7a1932ab); Time taken: 0.135 seconds
INFO  : OK
+------------+--+
|  tab_name  |
+------------+--+
| customers  |
| sample_07  |
| sample_08  |
| web_logs   |
+------------+--+
4 rows selected (0.3 seconds)
0: jdbc:hive2://keyrussrprd01.datacentrics.co> !q
Closing: 0: jdbc:hive2://keyrussrprd01.datacentrics.com.br:10000/default;principal=hive/keyrussrprd01.datacentrics.com.br@DATACENTRICS.COM
[root@keyrussrprd01 ~]# kdestroy
[root@keyrussrprd01 ~]# kinit ferdinand
Password for ferdinand@DATACENTRICS.COM:
[root@keyrussrprd01 ~]# beeline -u "jdbc:hive2://keyrussrprd01.datacentrics.com.br:10000/default;principal=hive/keyrussrprd01.datacentrics.com.br@DATACENTRICS.COM"
scan complete in 2ms
Connecting to jdbc:hive2://keyrussrprd01.datacentrics.com.br:10000/default;principal=hive/keyrussrprd01.datacentrics.com.br@DATACENTRICS.COM
Connected to: Apache Hive (version 1.1.0-cdh5.10.1)
Driver: Hive JDBC (version 1.1.0-cdh5.10.1)
Transaction isolation: TRANSACTION_REPEATABLE_READ
Beeline version 1.1.0-cdh5.10.1 by Apache Hive
0: jdbc:hive2://keyrussrprd01.datacentrics.co> show tables;
INFO  : Compiling command(queryId=hive_20170405203333_66714887-6a5a-4a03-b110-2b47efb86e64): show tables
INFO  : Semantic Analysis Completed
INFO  : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:tab_name, type:string, comment:from deserializer)], properties:null)
INFO  : Completed compiling command(queryId=hive_20170405203333_66714887-6a5a-4a03-b110-2b47efb86e64); Time taken: 0.067 seconds
INFO  : Executing command(queryId=hive_20170405203333_66714887-6a5a-4a03-b110-2b47efb86e64): show tables
INFO  : Starting task [Stage-0:DDL] in serial mode
INFO  : Completed executing command(queryId=hive_20170405203333_66714887-6a5a-4a03-b110-2b47efb86e64); Time taken: 0.137 seconds
INFO  : OK
+------------+--+
|  tab_name  |
+------------+--+
| sample_07  |
+------------+--+
1 row selected (0.298 seconds)
0: jdbc:hive2://keyrussrprd01.datacentrics.co>