Archive for the ‘Linux’ Category

Four years ago i wrote a post how to use SQUID in Active directory environment, in this one we’ll use SSSD service to log in to CentOS machine with Active Directory credentials.

The System Security Services Daemon (SSSD) provides access to remote identity and authentication providers.

Prerequistes:

DNS resolution:

Make sure domain name is resolved

cat /etc/resolv.conf

search test.com
nameserver 172.17.174.90

Install required packages:

yum install sssd realmd oddjob oddjob-mkhomedir adcli samba-common samba-common-tools krb5-workstation openldap-clients policycoreutils-python -y

Edit /etc/krb5.conf

# Configuration snippets may be placed in this directory as well
includedir /etc/krb5.conf.d/

includedir /var/lib/sss/pubconf/krb5.include.d/
[logging]
 default = FILE:/var/log/krb5libs.log
 kdc = FILE:/var/log/krb5kdc.log
 admin_server = FILE:/var/log/kadmind.log

[libdefaults]
 dns_lookup_realm = false
 ticket_lifetime = 24h
 renew_lifetime = 7d
 forwardable = true
 rdns = false
 pkinit_anchors = /etc/pki/tls/certs/ca-bundle.crt

 default_ccache_name = KEYRING:persistent:%{uid}

 default_realm = TEST.COM
[realms]
 TEST.COM = {
 }

[domain_realm]

 test.com = TEST.COM
 .test.com = TEST.COM

Edit /etc/nsswitch.conf

#
# /etc/nsswitch.conf
#
# An example Name Service Switch config file. This file should be
# sorted with the most-used services at the beginning.
#
# The entry '[NOTFOUND=return]' means that the search for an
# entry should stop if the search in the previous entry turned
# up nothing. Note that if the search failed due to some other reason
# (like no NIS server responding) then the search continues with the
# next entry.
#
# Valid entries include:
#
#       nisplus                 Use NIS+ (NIS version 3)
#       nis                     Use NIS (NIS version 2), also called YP
#       dns                     Use DNS (Domain Name Service)
#       files                   Use the local files
#       db                      Use the local database (.db) files
#       compat                  Use NIS on compat mode
#       hesiod                  Use Hesiod for user lookups
#       [NOTFOUND=return]       Stop searching if not found so far
#

# To use db, put the "db" in front of "files" for entries you want to be
# looked up first in the databases
#
# Example:
#passwd:    db files nisplus nis
#shadow:    db files nisplus nis
#group:     db files nisplus nis

passwd:     files sss
shadow:     files sss
group:      files sss
#initgroups: files sss

#hosts:     db files nisplus nis dns
hosts:      files dns myhostname

# Example - obey only what nisplus tells us...
#services:   nisplus [NOTFOUND=return] files
#networks:   nisplus [NOTFOUND=return] files
#protocols:  nisplus [NOTFOUND=return] files
#rpc:        nisplus [NOTFOUND=return] files
#ethers:     nisplus [NOTFOUND=return] files
#netmasks:   nisplus [NOTFOUND=return] files

bootparams: nisplus [NOTFOUND=return] files

ethers:     files
netmasks:   files
networks:   files
protocols:  files
rpc:        files
services:   files sss

netgroup:   nisplus sss

publickey:  nisplus

automount:  files nisplus sss
aliases:    files nisplus

Edit /etc/pam.d/system-auth


#%PAM-1.0
# This file is auto-generated.
# User changes will be destroyed the next time authconfig is run.
auth        required      pam_env.so
auth        required      pam_faildelay.so delay=2000000
auth        [default=1 ignore=ignore success=ok] pam_succeed_if.so uid >= 1000 quiet
auth        [default=1 ignore=ignore success=ok] pam_localuser.so
auth        sufficient    pam_unix.so nullok try_first_pass
auth        requisite     pam_succeed_if.so uid >= 1000 quiet_success
auth        sufficient    pam_sss.so forward_pass
auth        required      pam_deny.so

account     required      pam_unix.so
account     sufficient    pam_localuser.so
account     sufficient    pam_succeed_if.so uid < 1000 quiet
account     [default=bad success=ok user_unknown=ignore] pam_sss.so
account     required      pam_permit.so

password    requisite     pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type=
password    sufficient    pam_unix.so sha512 shadow nullok try_first_pass use_authtok
password    sufficient    pam_sss.so use_authtok
password    required      pam_deny.so

session     optional      pam_keyinit.so revoke
session     required      pam_limits.so
-session     optional      pam_systemd.so
session     optional      pam_oddjob_mkhomedir.so umask=0077
session     [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid
session     required      pam_unix.so
session     optional      pam_sss.so

In Active Directory i created 2 AD groups:

RootUser: users in this group will have root permissions on CentOS box

NonRootUser: users in this group won't have sudo permissions.

2.PNG

Create file /etc/sssd/sssd.conf

Set valid permissions:

chmod 600 /etc/sssd/sssd.conf

edit sssd.conf


[sssd]

domains = test.com

config_file_version = 2

services = nss, pam

[domain/test.com]

ad_domain = test.com

krb5_realm = TEST.COM

realmd_tags = manages-system joined-with-samba

cache_credentials = True

id_provider = ad

krb5_store_password_if_offline = True

default_shell = /bin/bash

ldap_id_mapping = True

use_fully_qualified_names = False

fallback_homedir = /home/%u@%d

access_provider = simple

simple_allow_groups = RootUser,NonRootUser

Now it’s time to join CentOS to Active Directory domain:

realm join --user=Administrator test.com -v

[root@localhost sssd]# realm join --user=Administrator TEST.COM -v
* Resolving: _ldap._tcp.test.com
* Performing LDAP DSE lookup on: 172.17.174.90
* Successfully discovered: test.com
realm: Already joined to this domain

If you get above output (realm: Already joined to this domain) try leaving domain and joining again.

realm leave test.com
realm join --user=Administrator test.com -v

CentOS computer object should be visible in default Computers container in Active Directory users and computers.

Remove sudo command/permission:

Edit /etc/sudoers file in order to set following permissions:

RootUser: users in this group will have root permissions on CentOS box

NonRootUser:remove sudo rights

visudo
%test.com\\RootUser    ALL=(ALL)    ALL
%test.com\\NonRootUser    ALL=(ALL)    !/usr/bin/su

Log in to CentOS using Active directory credentials:

Username: test\user1

Password: AD password

Now, if user from NonRootUser group tries to execute sudo su, he’ll be denied.

Capture

Advertisements

Elastic Stack (collection of 3 open sources projects:Elasticsearch,Logastah and Kibana) is complete end-to-end log analysis solution which helps in deep searchinganalyzing and visualizing the log generated from different machines.

In this post we’ll install Elasticsearch,Logstash and Kibana in VM1.test.com, Elasticsearch and Logstash in VM2.test.com, then we’ll search data on Elasticsearch instance on VM2 from VM1, that’s why we need to connect Elasticsearch clusters on instances in VM1 and VM2. These 2 clusters are independed, direction is one-way (ES on VM1 will connect and search data located on VM2).

Also, on VM1 we’ll install filebeat (agent for collecting data from VM1) and will send data to logstash then to Elasticsearch.

We’ll also install winbeat (agent for windows machine) and it will send data to VM2.test.com ES cluster

VM1.TEST.192.168.74.37

VM2.TEST.COM:192.168.74.45

Capture.PNG

Actions on VM1 and VM2:

Create ELK repository

cat >>/etc/yum.repos.d/elk.repo<<EOF
[ELK-6.x]
name=ELK repository for 6.x packages
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
EOF

Install Elasticsearch

Elasticsearch is database where logs are stored, we’ll use Search Guard plugin for EKL security, it’s comercial solution but offers free plugin for SSL security. At the time of this writing (30.03.2019), lastest Search Guard plugin supports ES 6.6.2 version, so i’ll install this one in this exampe

yum install epel-release
yum install elasticsearch-6.6.2

Install java:

yum install java-1.8.0-openjdk

On both machines edit /etc/elasticsearch.yml

cluster.name: set different names (arbitrary)

network.host: set machine name (vm1.test.com)

uncomment

http.port: 9200

Enable, start elasticsearch and check if cluster is accessible

systemct enable elasticsearch
systemctl start elasticsearch

On VM1

curl -X GET "vm1.test.com:9200"

on VM2

curl -X GET "vm2.test.com:9200"

Output should be like this:

{
  "name" : "4P1fXFO",
  "cluster_name" : "elasticsearch",
  "cluster_uuid" : "NcmuS7CyTHyUIQMcNcT3PA",
  "version" : {
    "number" : "6.6.2",
    "build_flavor" : "default",
    "build_type" : "rpm",
    "build_hash" : "3bd3e59",
    "build_date" : "2019-03-06T15:16:26.864148Z",
    "build_snapshot" : false,
    "lucene_version" : "7.6.0",
    "minimum_wire_compatibility_version" : "5.6.0",
    "minimum_index_compatibility_version" : "5.0.0"
  },
  "tagline" : "You Know, for Search"
}

Actions on VM1

Install Kibana, which wil be used for data visualisation

yum install kibana-6.6.2
yum install nginx

Because Kibana allows access only from one machine, we’ll use Nginx to access Kibana GUI from anywhere

Create file /etc/nginx/conf.d/kibana.conf

server {
listen 80;

server_name example.com www.example.com;

auth_basic "Restricted Access";
auth_basic_user_file /etc/nginx/htpasswd.users;

location / {
proxy_pass http://localhost:5601;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}

Start kibana and nginx, test web access

systemct enable kibana
systemctl start kibana
systemct enable nginx
systemct start nginx

2.PNG

Create SSL certificates

I used this great guide to create SSL certificates

Download and extract SSL Search Guard tools

wget https://search.maven.org/remotecontent?filepath=com/floragunn/search-guard-tlstool/1.6/search-guard-tlstool-1.6.tar.gz
tar xvzf remotecontent\?filepath\=com%2Ffloragunn%2Fsearch-guard-tlstool%2F1.6%2Fsearch-guard-tlstool-1.6.tar.gz

Create config file for tstool (test_cluster.yml)

###
### Self-generated certificate authority
###
#
# If you want to create a new certificate authority, you must specify its parameters here.
# You can skip this section if you only want to create CSRs
#
ca:
   root:
      # The distinguished name of this CA. You must specify a distinguished name.
      dn: CN=root.ca.test.com,OU=CA,O=BugBear.BG\, Ltd.,DC=BugBear,DC=com

      # The size of the generated key in bits
      keysize: 2048

      # The validity of the generated certificate in days from now
      validityDays: 3650

      # Password for private key
      #   Possible values:
      #   - auto: automatically generated password, returned in config output;
      #   - none: unencrypted private key;
      #   - other values: other values are used directly as password
      pkPassword: none

      # The name of the generated files can be changed here
      file: root-ca.pem

   # If you want to use an intermediate certificate as signing certificate,
   # please specify its parameters here. This is optional. If you remove this section,
   # the root certificate will be used for signing.
   intermediate:
      # The distinguished name of this CA. You must specify a distinguished name.
      dn: CN=signing.ca.test.com,OU=CA,O=BugBear.BG\, Ltd.,DC=BugBear,DC=com

      # The size of the generated key in bits
      keysize: 2048

      # The validity of the generated certificate in days from now
      validityDays: 3650

      pkPassword: none

      # If you have a certificate revocation list, you can specify its distribution points here
      crlDistributionPoints: URI:https://raw.githubusercontent.com/floragunncom/unittest-assets/master/revoked.crl

###
### Default values and global settings
###
defaults:

      # The validity of the generated certificate in days from now
      validityDays: 3650

      # Password for private key
      #   Possible values:
      #   - auto: automatically generated password, returned in config output;
      #   - none: unencrypted private key;
      #   - other values: other values are used directly as password
      pkPassword: none

      # Specifies to recognize legitimate nodes by the distinguished names
      # of the certificates. This can be a list of DNs, which can contain wildcards.
      # Furthermore, it is possible to specify regular expressions by
      # enclosing the DN in //.
      # Specification of this is optional. The tool will always include
      # the DNs of the nodes specified in the nodes section.
      #nodesDn:
      #- "CN=*.example.com,OU=Ops,O=Example Com\\, Inc.,DC=example,DC=com"
      # - 'CN=node.other.com,OU=SSL,O=Test,L=Test,C=DE'
      # - 'CN=*.example.com,OU=SSL,O=Test,L=Test,C=DE'
      # - 'CN=elk-devcluster*'
      # - '/CN=.*regex/'

      # If you want to use OIDs to mark legitimate node certificates,
      # the OID can be included in the certificates by specifying the following
      # attribute

      # nodeOid: "1.2.3.4.5.5"

      # The length of auto generated passwords
      generatedPasswordLength: 12

      # Set this to true in order to generate config and certificates for
      # the HTTP interface of nodes
      httpsEnabled: true

      # Set this to true in order to re-use the node transport certificates
      # for the HTTP interfaces. Only recognized if httpsEnabled is true

      # reuseTransportCertificatesForHttp: false

      # Set this to true to enable hostname verification
      #verifyHostnames: false

      # Set this to true to resolve hostnames
      #resolveHostnames: false

###
### Nodes
###
#
# Specify the nodes of your ES cluster here
#
nodes:
  - name: node1
    dn: CN=vm1.test.com,OU=Ops,O=BugBear BG\, Ltd.,DC=BugBear,DC=com
    dns:
      - vm1.test.com
    ip:
      - 192.168.74.37

  - name: node2
    dn: CN=vm2.test.com,OU=Ops,O=BugBear BG\, Ltd.,DC=BugBear,DC=com
    dns:
      - vm2.test.com
    ip:
      - 192.168.74.45

###
### Clients
###
#
# Specify the clients that shall access your ES cluster with certificate authentication here
#
# At least one client must be an admin user (i.e., a super-user). Admin users can
# be specified with the attribute admin: true
#
clients:
  - name: admin
    dn: CN=admin.test.com,OU=Ops,O=BugBear Com\, Inc.,DC=example,DC=com
    admin: true

Create certificates:on VM1

cd tools/
 
# Generate new signing authority
./sgtlstool.sh -c ../config/test_cluster.yml -v -ca
 
# Generate CSR's for node + admin certs
./sgtlstool.sh -c ../config/test_cluster.yml -v -csr
 
# Generate cert/keys
./sgtlstool.sh -f -o -c ../config/test_cluster.yml -v -crt

On both machines:

yum install logstash
mkdir /etc/elasticsearch/ssl
mkdir /etc/logstash/ssl

On VM1:

cd out
yum install filebeat
cp node1.pem node1.key /etc/nginx/
cp node1.pem node1.key root-ca.pem /etc/logstash/ssl
cp root-ca.pem /etc/pki/tls/certs
cp root-ca.pem node1.key node1.pem node1_http.pem node1_http.key admin.key admin.pem /etc/elasticsearch/ssl
chown elastocsearch:elasticsearch /etc/elasticsearch/ssl
chown logstash:logstash /etc/logstash/ssl
# copy files to vm2.test.com
scp node2.key node2.pem root-ca.pem root@vm2.test.com:/etc/logstash/ssl
scp node2.pem node2.key root-ca.pem node2_http.key node2_http.pem admin.key admin.pem root@vm2.test.com:/etc/elasticsearch/tls/

Disable cluster shard allocation

curl -Ss -XPUT 'http://vm1.test.com:9200/_cluster/settings?pretty' -H 'Content-Type: application/json' -d '{"persistent":{"cluster.routing.allocation.enable": "none" }}'

Check which search-guard plugin version you need to install

Detect your Elasticsearch version and download correct Search guard version

Stop Elasticsearch cluster and installl search guard plugin

stop elasticsearch
/usr/share/elasticsearch/bin/elasticsearch-plugin install -b com.flosystemctlragunn:search-guard-6:6.6.2-24.2

Add following lines to /etc/elasticsearch.yml

xpack.security.enabled: false
searchguard.enterprise_modules_enabled: false
searchguard.ssl.transport.pemcert_filepath: ssl/node1.pem
searchguard.ssl.transport.pemkey_filepath: ssl/node1.key
searchguard.ssl.transport.pemtrustedcas_filepath: ssl/root-ca.pem
searchguard.ssl.transport.enforce_hostname_verification: false
searchguard.ssl.transport.resolve_hostname: false
searchguard.ssl.http.enabled: true
searchguard.ssl.http.pemcert_filepath: ssl/node1_http.pem
searchguard.ssl.http.pemkey_filepath: ssl/node1_http.key
searchguard.ssl.http.pemtrustedcas_filepath: ssl/root-ca.pem
searchguard.nodes_dn:
- CN=vm1.test.com,OU=Ops,O=BugBear BG\, Ltd.,DC=BugBear,DC=com
- CN=vm2.test.com,OU=Ops,O=BugBear BG\, Ltd.,DC=BugBear,DC=com
searchguard.authcz.admin_dn:
- CN=admin.test.com,OU=Ops,O=BugBear Com\, Inc.,DC=example,DC=com

Allow logstash role to create indexes

vi /usr/share/elasticsearch/plugins/search-guard-6/sgconfig/sg_roles.yml

edit as below:

sg_logstash:
  cluster:
    - CLUSTER_MONITOR
    - CLUSTER_COMPOSITE_OPS
    - indices:admin/template/get
    - indices:admin/template/put
  indices:
    '*':
      '*':
        - CRUD
        - CREATE_INDEX
    '*beat*':
      '*':
        - CRUD
        - CREATE_INDEX

Start Elasticsearch cluster and enable shard allocation

cd /usr/share/elasticsearch/plugins/search-guard-6/tools/

yum start elasticsearch
# Re-enable cluster shard allocation
bash sgadmin.sh --enable-shard-allocation -key /etc/elasticsearch/ssl/admin.key -cert /etc/elasticsearch/ssl/admin.pem -cacert /etc/elasticsearch/ssl/root-ca.pem -icl -nhnv -h vm1.test.com
systemctl restart elasticsearch

Default search guard username/password is admin:admin, to change it run following, enter new password when prompted

bash /usr/share/elasticsearch/plugins/search-guard-6/tools/hash.sh

Copy hash and replace old one in /usr/share/elasticsearch/plugins/search-guard-6/sgconfig/sg_internal_users.yml

For these changes to take effect (updating passwor and logstash role changes) run sgadmin tools

cd /usr/share/elasticsearch/plugins/search-guard-6/tools
bash sgadmin.sh -cd /usr/share/elasticsearch/plugins/search-guard-6/sgconfig -icl -key /etc/elasticsearch/ssl/admin.key -cert /etc/elasticsearch/ssl/admin.pem -cacert /etc/elasticsearch/ssl/root-ca.pem -nhnv -h vm1.test.com

Check access:

yum install jq
curl -Ss -k https://admin:admin@vm1.test.com:9200/_cluster/health | jq

In case of any errors check /var/log/elasticsearch/.log 


Now, when our cluster is secured, we need to configure kibana and nginx to use SSL

cat /etc/nging/conf.d/kibana.conf
server {
     listen 80;
     server_name vm1.test.com; # Replace it with your Subdomain
     return 301 https://$host$request_uri;
}

server {
    listen *:443 ssl;
    server_name vm1.test.com;
    access_log /var/log/nginx/ekl.access.log;
    ssl_certificate /etc/nginx/node1.pem;
    ssl_certificate_key /etc/nginx/node1.key;
    ssl on;
    ssl_session_cache builtin:1000 shared:SSL:10m;
    ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
    ssl_ciphers HIGH:!aNULL:!eNULL:!EXPORT:!CAMELLIA:!DES:!MD5:!PSK:!RC4;
    ssl_prefer_server_ciphers on;

       location / {

        #auth_basic "Restricted Content";
        #auth_basic_user_file /etc/nginx/conf.d/htpasswd.users;
        proxy_pass http://localhost:5601;
        proxy_redirect http://localhost:5601 https://vm1.test.com;

   }
}
cat /etc/kibana/kibana.yml
elasticsearch.hosts: ["https://vm1.test.com:9200"]
elasticsearch.ssl.verificationMode: none
elasticsearch.username: "admin"
elasticsearch.password: "admin"

Restart nginx and kibana and type http://vm1.test.com, you should be redirected to HTTPS:

4.PNG

Enter username/password provided by serach guard (or put new password if pu changed one)

(

You should be able to see Kibana web page

Configuring logstash

Create simple config file for pushing data to elasticsearch

cat /etc/logstash/conf.d/example.conf

input {
  beats {
    port => 5044
    ssl => true
    ssl_certificate => "/etc/logstash/ssl/node1.pem"
    ssl_key => "/etc/logstash/ssl/node1.key"
  }
}

#filter {
#    if [type] == "syslog" {
#        grok {
#            match => {
#                "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}"
#            }
#            add_field => [ "received_at", "%{@timestamp}" ]
#            add_field => [ "received_from", "%{host}" ]
#        }
#        syslog_pri { }
#        date {
#            match => [ "syslog_timestamp", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
#        }
#    }
#}

output {
    elasticsearch {
        hosts => "vm1.test.com:9200"
        index => "vm1-%{+YYYY.MM.dd}"
        user => logstash
        password => "logstash"
        ssl => true
        ssl_certificate_verification => true
        cacert => "/etc/logstash/ssl/root-ca.pem"

    }
}

Configure filebeat on VM1

Filebeat is agent software for collecting data from client machine and it can send it to Logstash or Elasticsearch, in this example it was sent to logstash. Filebeat is installed in one of previous steps

cat /etc/logstash/logstash.yml (in this example only /etc/audit/audit.log is collected), put comment for elasticsearch

  - type: log

  # Change to true to enable this input configuration.
  enabled: true

  # Paths that should be crawled and fetched. Glob based paths.
  paths:
    - /var/log/audit/audit.log

#----------------------------- Logstash output --------------------------------
output.logstash:
  # The Logstash hosts
  hosts: ["vm1.test.com:5044"]

  # Optional SSL. By default is off.
  # List of root certificates for HTTPS server verifications
  ssl.certificate_authorities: ["/etc/pki/tls/certs/root-ca.pem"]

  # Certificate for SSL client authentication
  #ssl.certificate: "/etc/pki/client/cert.pem"

  # Client Certificate Key
  #ssl.key: "/etc/pki/client/cert.key"
systemctl enable filebeat
systemctl start filebeat
systemctl start logstash
systemct enable logstash

Check filebeat for error

filebeat -e -c /etc/filebeat/filebeat.yml

In case of errors check /var/log/logstash/logstash-plain.log

If all is fine, you shoud see data in Kibana-index management

Capture.PNG

In order to search it go to Management-Kibana-Index Patterns

Capture.PNG

Create index pattern

3.PNG

2.PNG

Discover-select filter

3.PNG

Select time range and see report.

2.PNG

Now afer we secured VM1 and copied certificates to  VM2 we need to configure VM2 too

Configuring VM2.test.com

Set file permissions,disable shard allocations,stop cluster and install search guard

chown elasticsearch:elasticsearch /etc/elasticsearch/ssl
chown logstash:logstash /etc/logstash/ssl
stop elasticsearch
/usr/share/elasticsearch/bin/elasticsearch-plugin install -b com.flosystemctlragunn:search-guard-6:6.6.2-24.2

edit /etc/elasticsearch/elasticsearch.yml

xpack.security.enabled: false
searchguard.enterprise_modules_enabled: false
searchguard.ssl.transport.pemcert_filepath: ssl/node2.pem
searchguard.ssl.transport.pemkey_filepath: ssl/node2.key
searchguard.ssl.transport.pemtrustedcas_filepath: ssl/root-ca.pem
searchguard.ssl.transport.enforce_hostname_verification: false
searchguard.ssl.transport.resolve_hostname: false
searchguard.ssl.http.enabled: true
searchguard.ssl.http.pemcert_filepath: ssl/node2_http.pem
searchguard.ssl.http.pemkey_filepath: ssl/node2_http.key
searchguard.ssl.http.pemtrustedcas_filepath: ssl/root-ca.pem
searchguard.nodes_dn:
- CN=vm1.test.com,OU=Ops,O=BugBear BG\, Ltd.,DC=BugBear,DC=com
- CN=vm2.test.com,OU=Ops,O=BugBear BG\, Ltd.,DC=BugBear,DC=com
searchguard.authcz.admin_dn:
- CN=admin.test.com,OU=Ops,O=BugBear Com\, Inc.,DC=example,DC=com

Allow logstash role to create indexes

vi /usr/share/elasticsearch/plugins/search-guard-6/sgconfig/sg_roles.yml

edit as below:

sg_logstash:
  cluster:
    - CLUSTER_MONITOR
    - CLUSTER_COMPOSITE_OPS
    - indices:admin/template/get
    - indices:admin/template/put
  indices:
    '*':
      '*':
        - CRUD
        - CREATE_INDEX
    '*beat*':
      '*':
        - CRUD
        - CREATE_INDEX

Start Elasticsearch cluster and enable shard allocation

cd /usr/share/elasticsearch/plugins/search-guard-6/tools/

yum start elasticsearch
# Re-enable cluster shard allocation
bash sgadmin.sh --enable-shard-allocation -key /etc/elasticsearch/ssl/admin.key -cert /etc/elasticsearch/ssl/admin.pem -cacert /etc/elasticsearch/ssl/root-ca.pem -icl -nhnv -h vm2.test.com
systemctl restart elasticsearch

Default search guard username/password is admin:admin, to change it run following, enter new password when prompted

bash /usr/share/elasticsearch/plugins/search-guard-6/tools/hash.sh

Copy hash and replace old one in /usr/share/elasticsearch/plugins/search-guard-6/sgconfig/sg_internal_users.yml

For these changes to take effect (updating passwor and logstash role changes) run sgadmin tools

cd /usr/share/elasticsearch/plugins/search-guard-6/tools
bash sgadmin.sh -cd /usr/share/elasticsearch/plugins/search-guard-6/sgconfig -icl -key /etc/elasticsearch/ssl/admin.key -cert /etc/elasticsearch/ssl/admin.pem -cacert /etc/elasticsearch/ssl/root-ca.pem -nhnv -h vm2.test.com
systemctle restrart elasticsearch

Check access:

yum install jq
curl -Ss -k https://admin:admin@vm2.test.com:9200/_cluster/health | jq

Configuring logstash

Logstash input is windows event log and output is elasticsearch

cat /etc/logstash/conf.d/events.conf

input {
    beats {
    port => 5044
    ssl => true
    ssl_certificate => "/etc/logstash/ssl/node2.pem"
    ssl_key => "/etc/logstash/ssl/node2.key"

  }
}

output {
    elasticsearch {
        hosts => ["https://vm2.test.com:9200"]
        index => "client02-eventviewer-%{+YYYY.MM.dd}"
        user => logstash
        password => "logstash"
        ssl => true
        ssl_certificate_verification => true
        cacert => "/etc/logstash/ssl/root-ca.pem"

    }
}

Start logstash

systemctl enable logstash
systemctl start logstash

Installig windowsbeat (ELK agent) on Windows 10

Winlogbeat collects data from windows machine

Unzip file, move it to C:\Program Files, copy content of root-ca.pem to this folder,install agent and configure it

set-executionpolicy -unrestricted
.\install-service-winlogbeat.ps1

C:\Program Files\winlogbeat\winlogbeat.yml

winlogbeat.event_logs:
  - name: Application
    ignore_older: 72h
  - name: Security
  - name: System
winlogbeat.registry_file: C:/ProgramData/winlogbeat/.winlogbeat.yml

output.logstash:
  # The Logstash hosts
  hosts: ["vm2.test.com:5044"]

  # Optional SSL. By default is off.
  # List of root certificates for HTTPS server verifications
  ssl.certificate_authorities: ["C:/Program Files/winlogbeat/root-ca.pem"]

logging.to_files: true
logging.files:
  path: "C:/Program Files/winlogbeat/Logs/"
logging.level: error

If SSL is enabled, copy content of root-ca.pem to C:\Program Files\Winlogbeat\root-ca.pem file

1.PNG

Check config for errors:

.\winlogbeat.exe test config -c .\winlogbeat.yml -e

Start winlofgbeat

2.PNG

Check logs in C:\Program Files\winlogbeat\Logs

Creating Remote cluster and Cross-cluster serach

Remote clusters module enables you to establish uni-directional connections to a remote cluster. It allows any node to act as a federated client across multiple clusters and allow only searching remote cluster using feature called Cross cluster search. Windows 10 event logs are passed to VM2 cluster and we’ll get it data from VM1 cluster.

On VM2 add following lines to /etc/elasticsearch/elasticsearch.yml

http.cors.enabled: true
http.cors.allow-origin: "*"

Make sure that you can telnet on port 9300 from VM1

On VM1, on Kibana click Management-Remote Clusters

Capture.PNG

specify vm2.test.com:9300

4.PNG

2.PNG

Searching remote cluster

Management-index pattern-Create index pattern-type clustername:index name in order to search remote cluster

Capture.PNG

Adding node to cluster

In this example i’ll add one node to exisitng cluster

master: ekl.test.com

make sure 127.0.0.1 is not bound to ekl.test.com

node:ekl1.test1.com

make sure 127.0.0.1 is not bound to ekl1.test1.com

master:

cluster.name: client1
searchguard.enterprise_modules_enabled: false
node.name: ekl.test.com
node.master: true
node.data: true
node.ingest: true

node:

cluster.name: client1
searchguard.enterprise_modules_enabled: false
node.name: ekl1.test1.com
node.master: true
node.data: true
node.ingest: true
discovery.zen.ping.unicast.hosts: ["ekl.test.com:9300", "ekl1.test1.com:9300"]
discovery.zen.minimum_master_nodes: 1
transport.tcp.port: 9300
transport.host: ekl1.test1.com

Getting logs from syslog devices

Fortigate configuration

Login to Fortigate, open CLI and type

config log syslogd setting
    set status enable
    set server "logstash server IP"
    set port 5044
end

cat /etc/logstash/conf.d/fortigate.conf

input {
udp {
port => 5044
type => firewall
}
}

filter {

if [type] == "firewall" {
        mutate {
                add_tag => ["fortigate"]
                        }
        grok {
            break_on_match => false
                match => ["message", "%{SYSLOG5424PRI:syslog_index}%{GREEDYDATA:message}"]
                overwrite => [ "message" ]
                tag_on_failure => [ "failure_grok_fortigate" ]
        }
                kv { }
        if [msg] {
                mutate {
                        replace => [ "message", "%{msg}" ]
                }
        }
        mutate {
                convert => { "duration" => "integer" }
                convert => { "rcvdbyte" => "integer" }
                convert => { "rcvdpkt" => "integer" }
                convert => { "sentbyte" => "integer" }
                convert => { "sentpkt" => "integer" }
                convert => { "cpu" => "integer" }
                convert => { "disk" => "integer" }
                convert => { "disklograte" => "integer" }
                convert => { "fazlograte" => "integer" }
                convert => { "mem" => "integer" }
                convert => { "totalsession" => "integer" }
        }
        mutate {
                add_field => ["logTimestamp", "%{date} %{time}"]
                add_field => ["loglevel", "%{level}"]
                replace => [ "fortigate_type", "%{type}"]
                replace => [ "fortigate_subtype", "%{subtype}"]
                remove_field => [ "msg", "type", "level", "date", "time" ]
        }
        date {
                locale => "en"
                match => ["logTimestamp", "YYYY-MM-dd HH:mm:ss"]
                remove_field => ["logTimestamp", "year", "month", "day", "time", "date"]
                add_field => ["type", "syslog"]
        }
        if [status] == "clash" {

                grok {
                        match => { "new_status" => "state=%{GREEDYDATA:new_status_state1} tuple-num=%{GREEDYDATA:new_status_tuple-num1} policyid=%{GREEDYDATA:new_status_policyid1} identidx=%{GREEDYDATA:new_status_identidx1} dir=%{GREEDYDATA:new_status_dir1} act=%{GREEDYDATA:new_status_act1} hook=%{GREEDYDATA:new_status_hook1} dir=%{GREEDYDATA:new_status_dir2} act=%{GREEDYDATA:new_status_act2} hook=%{GREEDYDATA:new_status_hook2} dir=%{GREEDYDATA:new_status_dir3} act=%{GREEDYDATA:new_status_act3} hook=%{GREEDYDATA:new_status_hook3}" }
                }
                grok {
                        match => { "old_status" => "state=%{GREEDYDATA:old_status_state1} tuple-num=%{GREEDYDATA:old_status_tuple-num1} policyid=%{GREEDYDATA:old_status_policyid1} identidx=%{GREEDYDATA:old_status_identidx1} dir=%{GREEDYDATA:old_status_dir1} act=%{GREEDYDATA:old_status_act1} hook=%{GREEDYDATA:old_status_hook1} dir=%{GREEDYDATA:old_status_dir2} act=%{GREEDYDATA:old_status_act2} hook=%{GREEDYDATA:old_status_hook2} dir=%{GREEDYDATA:old_status_dir3} act=%{GREEDYDATA:old_status_act3} hook=%{GREEDYDATA:old_status_hook3}" }
                }
        }
}

}

output {
    elasticsearch {
        hosts => ["ekl1.test1.com:9200"]
        index => "client02-fortigate-%{+YYYY.MM.dd}"
        user => logstash
        password => "logstash"
        ssl => true
        ssl_certificate_verification => true
        cacert => "/etc/logstash/ssl/root-ca.pem"

    }
}

Active Directory (LDAP) authentication

For this Search Guard Enterprise license is required, change

searchguard.enterprise_modules_enabled: fals

to

searchguard.enterprise_modules_enabled: true

in /etc/elasticsearch.yml

1.PNG

In this example service account for searching Active Directory  is located in service_accounts OU (service).

Users who need to access Elasticsearch are located in UA folder and AD group test (role) is used to give access to Elasticsearch. So all users wo need to access Elasticsearch/Kibana needs to be put in test AD group.

cd /usr/share/elasticsearch/plugins/search-guard-6/sgconfig/

edit sg_config.yml

searchguard:
  dynamic:
    # Set filtered_alias_mode to 'disallow' to forbid more than 2 filtered aliases per index
    # Set filtered_alias_mode to 'warn' to allow more than 2 filtered aliases per index but warns about it (default)
    # Set filtered_alias_mode to 'nowarn' to allow more than 2 filtered aliases per index silently
    #filtered_alias_mode: warn
    kibana:
      # Kibana multitenancy - NOT FREE FOR COMMERCIAL USE
      # see https://github.com/floragunncom/search-guard-docs/blob/master/multitenancy.md
      # To make this work you need to install https://github.com/floragunncom/search-guard-module-kibana-multitenancy/wiki
      #multitenancy_enabled: true
      #server_username: kibanaserver
      #index: '.kibana'
      #do_not_fail_on_forbidden: false
    http:
      anonymous_auth_enabled: false
      xff:
        enabled: false
        internalProxies: '192\.168\.0\.10|192\.168\.0\.11' # regex pattern
        #internalProxies: '.*' # trust all internal proxies, regex pattern
        remoteIpHeader:  'x-forwarded-for'
        proxiesHeader:   'x-forwarded-by'
        #trustedProxies: '.*' # trust all external proxies, regex pattern
        ###### see https://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html for regex help
        ###### more information about XFF https://en.wikipedia.org/wiki/X-Forwarded-For
        ###### and here https://tools.ietf.org/html/rfc7239
        ###### and https://tomcat.apache.org/tomcat-8.0-doc/config/valve.html#Remote_IP_Valve
    authc:
      kerberos_auth_domain:
        http_enabled: false
        transport_enabled: false
        order: 6
        http_authenticator:
          type: kerberos # NOT FREE FOR COMMERCIAL USE
          challenge: true
          config:
            # If true a lot of kerberos/security related debugging output will be logged to standard out
            krb_debug: false
            # If true then the realm will be stripped from the user name
            strip_realm_from_principal: true
        authentication_backend:
          type: noop
      basic_internal_auth_domain:
        http_enabled: true
        transport_enabled: true
        order: 4
        http_authenticator:
          type: basic
          challenge: true
        authentication_backend:
          type: intern
      proxy_auth_domain:
        http_enabled: false
        transport_enabled: false
        order: 3
        http_authenticator:
          type: proxy
          challenge: false
          config:
            user_header: "x-proxy-user"
            roles_header: "x-proxy-roles"
        authentication_backend:
          type: noop
      jwt_auth_domain:
        http_enabled: false
        transport_enabled: false
        order: 0
        http_authenticator:
          type: jwt
          challenge: false
          config:
            signing_key: "base64 encoded HMAC key or public RSA/ECDSA pem key"
            jwt_header: "Authorization"
            jwt_url_parameter: null
            roles_key: null
            subject_key: null
        authentication_backend:
          type: noop
      clientcert_auth_domain:
        http_enabled: false
        transport_enabled: false
        order: 2
        http_authenticator:
          type: clientcert
          config:
            username_attribute: cn #optional, if omitted DN becomes username
          challenge: false
        authentication_backend:
          type: noop
      ldap:
        http_enabled: true
        transport_enabled: true
        order: 2
        http_authenticator:
          type: basic
          challenge: false
        authentication_backend:
          # LDAP authentication backend (authenticate users against a LDAP or Active Directory)
          type: ldap # NOT FREE FOR COMMERCIAL USE
          config:
            # enable ldaps
            enable_ssl: false
            # enable start tls, enable_ssl should be false
            enable_start_tls: false
            # send client certificate
            enable_ssl_client_auth: false
            # verify ldap hostname
            verify_hostnames: true
            hosts:
              - dc.test.com:389
            bind_dn: "CN=service,OU=service_accounts,DC=test,DC=com"
            password: "Pass"
            userbase: "OU=UA,DC=test,DC=com"
            # Filter to search for users (currently in the whole subtree beneath userbase)
            # {0} is substituted with the username
            usersearch: "(cn={0})"
            # Use this attribute from the user as username (if not set then DN is used)
            username_attribute: "cn"
    authz:
      roles_from_myldap:
        http_enabled: true
        transport_enabled: true
        authorization_backend:
          # LDAP authorization backend (gather roles from a LDAP or Active Directory, you have to configure the above LDAP authentication backend settings too)
          type: ldap # NOT FREE FOR COMMERCIAL USE
          config:
            # enable ldaps
            enable_ssl: false
            # enable start tls, enable_ssl should be false
            enable_start_tls: false
            # send client certificate
            enable_ssl_client_auth: false
            # verify ldap hostname
            verify_hostnames: true
            hosts:
              - "dc.test.com:389"
            bind_dn: "CN=service,OU=service_accounts,DC=test,DC=com"
            password: "Pass"
            #rolebase: "OU=UA,DC=test,DC=com"
            rolebase: "OU=groups,DC=test,DC=com"
            # Filter to search for roles (currently in the whole subtree beneath rolebase)
            # {0} is substituted with the DN of the user
            # {1} is substituted with the username
            # {2} is substituted with an attribute value from user's directory entry, of the authenticated user. Use userroleattribute to specify the name of the attribute
            rolesearch: "(uniqueMember={0})"
            #rolesearch: "(member={2})"
            # Specify the name of the attribute which value should be substituted with {2} above
            userroleattribute: null
            # Roles as an attribute of the user entry
            #userrolename: disabled
            userrolename: "memberOf"
            # The attribute in a role entry containing the name of that role, Default is "name".
            # Can also be "dn" to use the full DN as rolename.
            rolename: "CN"
            # Resolve nested roles transitive (roles which are members of other roles and so on ...)
            resolve_nested_roles: "true"
            userbase: 'OU=groups,DC=test,DC=com'
            #userbase: "OU=UA,DC=test,DC=com"
            # Filter to search for users (currently in the whole subtree beneath userbase)
            # {0} is substituted with the username
            #usersearch: "(cn={0})"
            usersearch: "(uid={0})"
            # Skip users matching a user name, a wildcard or a regex pattern
            #skip_users:
            #  - 'cn=Michael Jackson,ou*people,o=TEST'
            #  - '/\S*/'
      roles_from_another_ldap:
        enabled: false
        authorization_backend:
          type: ldap # NOT FREE FOR COMMERCIAL USE
          #config goes here ...

Edit sg_roles_mapping.yml

sg_ad_admin:
  readonly: true
  backendroles:
    - test

Edit sg_roles.yml

sg_ad_admin:
  readonly: true
  cluster:
    - UNLIMITED
  indices:
    '*':
      '*':
        - UNLIMITED
  tenants:
    admin_tenant: RW

Restart elasticsearch service and run sgadmin tools

systemctl restart elasticsearch
bash /usr/share/elasticsearch/plugins/search-guard-6/tools/sgadmin.sh -cd /usr/share/elasticsearch/plugins/search-guard-6/sgconfig -icl -key /etc/elasticsearch/ssl/admin.key -cert /etc/elasticsearch/ssl/admin.pem -cacert /etc/elasticsearch/ssl/root-ca.pem -nhnv -h vm1.test.com

You should be now able to login to Kibana using Active Directory credentials

Get AWS IAM reports – Python script

Posted: February 26, 2019 in Linux, Scripts

Script bellow will run under Docker container and will get IAM user, group membership and IAM policies assigned to user. Script will create HTML file from CSV, will check if there is any diffrencies between old and new files, if there is, then it will write changes in separate file and will send HTML files as email body. Finally, it will connect to CloudTrail to check if IAM policies has changed in last 24 hours, if yes, it will send separate email to specific mailbox. Jira monitors that specific mailbox and ticket will be created automatically from it.

start.py

#!/usr/bin/python3

import boto3
import json
import csv
import sys
import os
import shutil
import pandas as pd
import subprocess
import smtplib
import argparse
import time
from bson import json_util
from shutil import copyfile
from os.path import basename
from smtplib import SMTP
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.header import Header
from email.utils import parseaddr, formataddr
from base64 import encodebytes

###########################

#Input variables

##############################

parser = argparse.ArgumentParser()
parser.add_argument('-aws_acccess_key_id', '-aws_access_key_id', dest='aws_access_key_id', help='AWS Access Key ID.')
parser.add_argument('-aws_secret_access_key_id', '-aws_secret+access_key_id', dest='aws_secret_access_key', help='AWS Secret access key.')
parser.add_argument('-html_body_recipient', '-html_body_recipient', dest='html_body_recipient', help='Recipient of email with HTML files as body.')
parser.add_argument('-jira_email', '-jira_email', dest='jira_email', help='JIRA mailbox for automatic ticket creation .')
parser.add_argument('-support_email', '-support_email', dest='support_email', help='Support mailbox.')
args = parser.parse_args()

aws_access_key_id = args.aws_access_key_id
aws_secret_access_key = args.aws_secret_access_key

html_body_recipient = args.html_body_recipient

jira_email = args.jira_email

support_email = args.support_email

os.environ['AWS_ACCESS_KEY_ID'] = aws_access_key_id

os.environ['AWS_SECRET_ACCESS_KEY'] = aws_secret_access_key

os.environ['AWS_DEFAULT_REGION'] = "eu-west-1"

def read_files(file_list):
            data = ''
            for filename in file_list:
                with open(filename) as file:
                    data += file.read()
            return data

def send_email(recipients=[jira_email],
         subject="AWS IAM Role Changes",
         body="Dear colleagues,\nplease see attached files for recent changes in report",
         zipfiles=['/media/company/company_role_assignemnt_changes.txt', '/media/company/cloudtrail.csv'],
         server="localhost",
         sender="Rundeck ",
         replyto="=?ISO-8859-1?Q?M=F8=F8=F8?= "): #: bool
    """Sends an e-mail"""
    to = ",".join(recipients)
    charset = "utf-8"
    # Testing if body can be encoded with the charset
    try:
        body.encode(charset)
    except UnicodeEncodeError:
        print("Could not encode " + body + " as " + charset + ".")
        return False

    # Split real name (which is optional) and email address parts
    sender_name, sender_addr = parseaddr(sender)
    replyto_name, replyto_addr = parseaddr(replyto)

    sender_name = str(Header(sender_name, charset))
    replyto_name = str(Header(replyto_name, charset))

    # Create the message ('plain' stands for Content-Type: text/plain)
    try:
        msgtext = MIMEText(body.encode(charset), 'plain', charset)
    except TypeError:
        print("MIMEText fail")
        return False

    msg = MIMEMultipart()

    msg['From'] = formataddr((sender_name, sender_addr))
    msg['To'] = to #formataddr((recipient_name, recipient_addr))
    msg['Reply-to'] = formataddr((replyto_name, replyto_addr))
    msg['Subject'] = Header(subject, charset)
    msg['CC'] = support_email
    msg.attach(msgtext)

    for zipfile in zipfiles:
        part = MIMEBase('application', "zip")
        b = open(zipfile, "rb").read()
        # Convert from bytes to a base64-encoded ascii string
        bs = encodebytes(b).decode()
        # Add the ascii-string to the payload
        part.set_payload(bs)
        # Tell the e-mail client that we're using base 64
        part.add_header('Content-Transfer-Encoding', 'base64')
        part.add_header('Content-Disposition', 'attachment; filename="%s"' %
                        os.path.basename(zipfile))
        msg.attach(part)

    s = SMTP()
    try:
        s.connect(server)
    except:
        print("Could not connect to smtp server: " + server)
        return False

        print("Sending the e-mail")
    s.sendmail(sender, recipients, msg.as_string())
    s.quit()
    return True

#def main():
    #send_email()

#if __name__ == "__main__":
    #main()

def send_html_body(header, body):
           msg = MIMEText(body, 'html')  # second parameter is MIME type
           msg['Subject'] = header['Subject']
           msg['From'] = header['From']
           msg['To'] = header['To']
           s = smtplib.SMTP('localhost')
           s.send_message(msg)
           s.quit()

###########################

#start postfix service

###########################

os.system("service postfix start")

filename="/media/company/changes.zip"

if os.path.exists(filename):
    os.remove(filename)

################################################################################

#Make a backup of yesterday's reports

####################################################################################

shutil.copy2('/media/company/company_users.csv', '/media/company/company_users_old.csv')
shutil.copy2('/media/company/company_groups.csv', '/media/company/company_groups_old.csv')
shutil.copy2('/media/company/company_users_policies.csv', '/media/company/company_users_policies_old.csv')
shutil.copy2('/media/company/company_group_policies.csv',  '/media/company/company_group_policies_old.csv')
shutil.copy2('/media/company/company_role_policies.csv', '/media/company/company_role_policies_old.csv')
shutil.copy2('/media/company/company_role_assignment.csv', '/media/company/company_role_assignment_old.csv')
shutil.copy2('/media/company/roles_assign.html', '/media/company/roles_assign_old.html')
shutil.copy2('/media/company/output.json', '/media/company/output_old.json')

#-----------------------------------------------------------

#This section creates CSV reports

#-------------------------------------------------------

#GET Users-------------------------------------------

def subprocess_cmd(command):
    process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)
    proc_stdout = process.communicate()[0].strip()
    print (proc_stdout)

subprocess_cmd("aws iam get-account-authorization-details &gt; /media/company/output.json;aws cloudtrail lookup-events --start-time `date -d 'yesterday' '+%m-%d-%Y'` --end-time  `date -d 'today' '+%m-%d-%Y'`&gt;/media/company/changes.json")

with open('/media/company/output.json') as file:
        data = json.load(file)

with open('/media/company/company_users.csv', 'wt') as file:
        file.write('Users\n')
        writer=csv.writer(file)
        for element in data['UserDetailList']:
         if 'UserName' in element.keys():
            s = element['UserName']
         file.write(s + '\n')

#Get Groups------------------------------------------------------

client = boto3.client('iam')

response = client.list_groups(

)

with open('/media/company/company_groups.csv', 'wt') as file:
        file.write('Groups\n')
        writer=csv.writer(file)
        for element in response['Groups']:
         file.write(user['GroupName']+ '\n')

#Get Users with associated policy--------------------------------------------------------

with open('/media/company/company_user_policies.csv', 'wt') as file:
    file.write('User,Policy\n')
    for element in data['UserDetailList']:
        if 'UserName' in element.keys():
            s = element['UserName']
        for policy in element['AttachedManagedPolicies']:
            c = s + ',' + policy['PolicyName']
            file.write(c + '\n')

#Get Groups with associated policies------------------------------------------------------------------

with open('/media/company/company_group_policies.csv', 'wt') as file:
       file.write('Group,Policies\n')
       for element in data['GroupDetailList']:
         s = element['GroupName']
         for policy in element['AttachedManagedPolicies']:

           file.write(s + "," + policy['PolicyName'] + '\n')

#Roles assigned to policies-----------------------------------------------------------------------------------------------------------------

with open('/media/company/company_role_policies.csv', 'wt') as file:
    file.write('Role,Policy\n')
    for element in data['RoleDetailList']:
     if 'RoleName' in element.keys():
         s= element['RoleName']
     for policy in element['AttachedManagedPolicies']:
         c = s + ',' + policy['PolicyName']
         file.write (c + '\n')

#Get IAM policies-----------------------------------------------------------------------------------------------------------

def get_user_group_service(element):
    s = ''
    for e in element['AssumeRolePolicyDocument']['Statement']:
        p = e['Principal']
        if 'Federated' in p:
            s += p['Federated']
        if 'Service' in p:
            obj = p['Service']
            if  type(obj) is str:
                s += obj  # element is string
            else:
                obj.sort()
                s += ''.join(obj) # element is array of strings

        if 'AWS' in p:
            s += p['AWS']
    return s

def get_policies(element):
    list = []
    if 'PolicyName' in element.keys():
        list.append(element['PolicyName'])
    for policy in element['AttachedManagedPolicies']:
        list.append(policy['PolicyName'])
    if len(element['RolePolicyList']) &gt; 0:
        list.append(element['RolePolicyList'][0]['PolicyName'])
    return '--'.join(list)

def main():
        with open('/media/company/company_role_assignment.csv', 'wt') as file:
           file.write('Role,Policy,User/Group/Service\n')
           for element in data['RoleDetailList']:
              s = element['RoleName'] + ',' + get_policies(element) + ',' + get_user_group_service(element)
              file.write(s + '\n')

main()

##########################################################################################################################################

##Get cloudwatch events (if any)

with open('/media/company/changes.json') as file:
 data = json.load(file)

for element in data['Events']:
 for resource in element['Resources']:
  if 'Username' in element:
   with open('/media/company/cloudtrail.csv', 'wt') as file:
    file.write('ResourceType,ResourceName,EventName,UserName\n')
    file.write(resource['ResourceType'] + ',' + resource['ResourceName'] + ',' + element['EventName'] + ',' + element['Username'] + '\n')

###################################################################################################

#Check if there are diferences between old and new reports

######################################################################################################

def compare_files(file1, file2, result_file):
    fname1 = file1
    fname2 = file2
    result = result_file
    output_string = ""
    # Open file for reading in text mode (default mode)
    f1 = open(fname1)
    f2 = open(fname2)
    # Print confirmation
    #print("-----------------------------------")
    #print("Comparing files ", " &gt; " + fname1, "  sign
        elif f1_line != '':
            print ("Line changed:Line-%d" % line_no + "-"+ f1_line)
            output_string += "Line changed:Line-%d" % line_no + "-" + f1_line +"\n"

        ########### If a line does not exist on file1 then mark the output with + sign
        if f1_line == '' and f2_line != '':
            print ("Line removed:Line-%d" % line_no + "-"+ f1_line)
            output_string += "Line removed:Line-%d" % line_no + "-" + f1_line +"\n"
          # otherwise output the line on file2 and mark it with &lt; sign
         #elif f2_line != &#039;&#039;:
            #print(&quot;<span id="mce_SELREST_start" style="overflow:hidden;line-height:0;"></span>&lt;&quot;, &quot;Line-%d&quot; %  line_no, f2_line)

         # Print a blank line
         #print()

    #Read the next line from the file
      f1_line = f1.readline()
      f2_line = f2.readline()
      #Increment line counter
      line_no += 1

    # Close the files
    f1.close()
    f2.close()
    #return output_string
    f = open(result, &quot;w&quot;)
    f.write(str(output_string))
    f.close

compare_files(&quot;/media/company/company_users.csv&quot;, &quot;/media/company/company_users_old.csv&quot;, &quot;/media/company/company_users_changes.txt&quot;)

compare_files(&quot;/media/company/company_groups.csv&quot;, &quot;/media/company/company_groups_old.csv&#039;, &#039;/media/company/company_groups_changes.txt&quot;)

compare_files(&quot;/media/company/company_users_policies.csv&quot;, &quot;/media/company/company_users_policies_old.csv&#039;, &#039;/media/company/company_users_policies_changes.txt&quot;)

compare_files(&quot;/media/company/company_group_policies.csv&quot;, &quot;/media/company/company_group_policies_old.csv&#039;, &#039;/media/company/company_group_policies_changes.txt&quot;)

compare_files(&quot;/media/company/company_role_policies.csv&quot;, &quot;/media/company/company_role_policies_old.csv&#039;, &#039;/media/company/company_role_policies_changes.txt&quot;)

compare_files(&quot;/media/company/company_role_assignment.csv&quot;, &quot;/media/company/company_role_assignment_old.csv&quot;, &quot;/media/company/company_role_assignemnt_changes.txt&quot;)

############################################################################################

#Look for changes and if any, collect it to changes.txt file

####################################################################################################

files = [&quot;/media/company/company_group_policies_changes.txt&quot;, &quot;/media/company/company_groups_changes.txt&quot;, &quot;/media/company/company_role_assignemnt_changes.txt&quot;, &quot;/media/company/company_role_policies_changes.txt&quot;, &quot;/media/company/company_users_changes.txt&quot;, &quot;/media/company/company_users_policies_changes.txt&quot;];

with open(&quot;/media/company/changes.txt&quot;,&quot;w&quot;) as file:

  for filename in files:
    with open(filename, &quot;r&quot;) as f:
        contents = f.read()
        output = &quot;&quot;
        if contents:
            output = &quot;FileName:&quot; + os.path.basename(filename) + &quot; &quot; + contents
            file.write(str(output + &#039;\n&#039; ))

#######################################################################################################

#Create HTML from CSV files

###################################################################################################

def html_from_csv(input_file, output_file):
    df = pd.read_csv(input_file)
    pd.set_option(&#039;display.max_colwidth&#039;, -1)
    pd.DataFrame({&#039;a&#039;: [1, 2]}).to_html()
    output = df.to_html(index=False)
    f = open(output_file, &#039;w&#039;)
    f.write(output)
    f.close


html_from_csv(&#039;/media//company/company_group_policies.csv&#039;, &#039;/media/company/group_policy.html&#039;)

html_from_csv(&#039;/media/company/company_groups.csv&#039;, &#039;/media/company/groups.html&#039;)

html_from_csv(&#039;/media/company/company_role_assignment.csv&#039;, &#039;/media/company/roles_assign.html&#039;)

html_from_csv(&#039;/media/company/company_role_policies.csv&#039;, &#039;/media/company/roles.html&#039;)

html_from_csv(&#039;/media/company/company_users_policies.csv&#039;, &#039;/media/company/user_policy.html&#039;)

html_from_csv(&#039;/media/company/company_users.csv&#039;, &#039;/media/company/users.html&#039;)

#########################################################

#Add caption  to HTML files

#################################################################

def html_caption(html_file, caption):
    append_copy = open(html_file, &quot;r&quot;)
    original_text = append_copy.read()
    append_copy.close()
    append_copy = open(html_file, &quot;w&quot;)
    append_copy.write(&quot;
<span id="mce_SELREST_end" style="overflow:hidden;line-height:0;"></span>\n<b>{caption}</b>

\n
\n".format(caption=caption))
    append_copy.write(original_text)
    append_copy.close()


html_caption('/media/company/groups.html', 'IAM Groups')

html_caption('/media/company/group_policy.html', 'IAM Group policy')

html_caption('/media/company/roles_assign.html', 'IAM Role Assignment')

html_caption('/media/company/roles.html', 'IAM Roles')

html_caption('/media/company/user_policy.html', 'IAM User policies')

################################################################################################

#if changes.txt file is not empty, convert it to HTML

###################################################################################################

with open('/media/company/changes.txt') as friendsfile:
    first = friendsfile.read(1)
    if not first:
        print('no changes')

    else:

        with open('/media/company/changes.txt') as fin, open('/media/company/change.txt', 'w') as fout:
           for line in fin:
               fout.write(line.replace(',', ''))
        contents = open("/media/company/changes.txt","r")
        with open("/media/company/changes.html", "w") as e:
             for lines in contents.readlines():
                 e.write("</pre>
<pre>" + lines + "</pre>
<pre>\n")
with open('/media/company/changes.txt') as friendsfile:
 first = friendsfile.read(1)
 if not first:
  #print ('file is empty)
  file_list = ['/media/company/no_changes.html','/media/company/users.html','/media/company/user_policy.html','/media/company/groups.html','/media/company/group_policy.html', '/media/company/roles.html', '/media/company/roles_assign.html']
  data = read_files(file_list)
  header = {'To': html_body_recipient, 'Subject': ' AWS IAM Reports' , 'From': 'svc@company.com'}
  send_html_body(header, data)
  time.sleep(10)
else:
  file_list = ['/media/company/changes.html', '/media/company/users.html',/media/company/user_policy.html','/media/company/groups.html','/media/company/group_policy.html', '/media/company/roles.html', '/media/company/roles_assign.html']
  data = read_files(file_list) header = {'To': html_body_recipient, 'Subject': ' AWS IAM Reports' , 'From': 'svc@company.com'} send_html_body(header, data)
  time.sleep(10)
#Create JIRA ticket if role assignment report has changes
with open('/media/company/company_role_assignemnt_changes.txt') as friendsfile: first = friendsfile.read(1)
if not first:
 print ('roleassignment is empty')
else:
 send_email()
 time.sleep(10)

requirements.txt:

requests
python-dateutil
json_tricks
boto3
pymongo
pandas
awscli

Dockerfile

FROM ubuntu:latest
WORKDIR /home
COPY . .

RUN echo "postfix postfix/mailname string rundeck.company.com" | debconf-set-selections && echo "postfix postfix/main_mailer_type string 'Internet Site'" | debconf-set-selections && apt-get update -y && apt-get install postfix sasl2-bin mailutils vim python3-pip -y && pip3 install --no-cache-dir -r requirements.txt && sed -i s/START=no/START=yes/g /etc/default/saslauthd && echo "[smtp.office365.com]:587 svc@company.com:pass" > /etc/postfix/sasl_passwd && echo "/.+/ svc@company.com" > /etc/postfix/sender_canonical && sed -i 's/inet_protocols = all/inet_protocols = ipv4/g' /etc/postfix/main.cf && sed -i 's/relayhost = /relayhost = [smtp.office365.com]:587/g' /etc/postfix/main.cf && sed -i '/smtpd_use_tls=yes/a smtp_sasl_auth_enable = yes' /etc/postfix/main.cf && sed -i '/smtpd_use_tls=yes/a smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd' /etc/postfix/main.cf && sed -i '/smtpd_use_tls=yes/a smtp_sasl_security_options = noanonymous' /etc/postfix/main.cf && sed -i '/smtpd_use_tls=yes/a smtp_tls_security_level = may' /etc/postfix/main.cf && sed -i '/smtpd_use_tls=yes/a sender_canonical_maps = regexp:/etc/postfix/sender_canonical' /etc/postfix/main.cf && sed -i '/smtpd_use_tls=yes/a smtp_tls_CAfile = /etc/postfix/cacert.pem' /etc/postfix/main.cf &&  mv /home/cacert.pem /etc/postfix/ && postmap hash:/etc/postfix/sasl_passwd && postmap hash:/etc/postfix/sender_canonical

ENTRYPOINT ["./start.py"]

Finally, we’ll map host folder to docker container so we can keep track of yesterday’s reports

docker run -it -v /home/centos/docker:/media test /bin/bash

Docker – create Postfix container

Posted: February 21, 2019 in docker, Linux

In this post we’ll create Docker image with postfix installed and configured as Office 365 relay host, pip3 will be also installes as well as Python request.

cacert.pem file contains Microsoft certificates for secure connection. Content of that file is output of following command:

openssl s_client -showcerts -starttls smtp -crlf -connect smtp.office365.com:587

start.sh is simple bash script which starts postfix and saslauthd services and sends test email. In order to prevent closing docker container before sending email i added sleep command at the end

#!/bin/bash

service postfix start && service saslauthd start
echo "sending email..."
echo "this is the body" | mail -s "this is the subject" "dvucanovic@example.com"
sleep 20

requirements.txt contains one string: requests, it will be used by pip3 command

Docker file

FROM: ubuntu:latest

WORKDIR /home

COPY . .

RUN echo "postfix postfix/mailname string rundeck.example.com" | debconf-set-selections && echo "postfix postfix/main_mailer_type string 'Internet Site'" | debconf-set-selections && apt-get update -y && apt-get install postfix sasl2-bin mailutils python3-pip -y && pip3 install --no-cache-dir -r requirements.txt && sed -i s/START=no/START=yes/g /etc/default/saslauthd && echo "[smtp.office365.com]:587 svc-user@example.com:password" > /etc/postfix/sasl_passwd && echo "/.+/ ssvc-user@example.com" > /etc/postfix/sender_canonical && sed -i 's/inet_protocols = all/inet_protocols = ipv4/g' /etc/postfix/main.cf && sed -i 's/relayhost = /relayhost = [smtp.office365.com]:587/g' /etc/postfix/main.cf && sed -i '/smtpd_use_tls=yes/a smtp_sasl_auth_enable = yes' /etc/postfix/main.cf && sed -i '/smtpd_use_tls=yes/a smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd' /etc/postfix/main.cf && sed -i '/smtpd_use_tls=yes/a smtp_sasl_security_options = noanonymous' /etc/postfix/main.cf && sed -i '/smtpd_use_tls=yes/a smtp_tls_security_level = may' /etc/postfix/main.cf && sed -i '/smtpd_use_tls=yes/a sender_canonical_maps = regexp:/etc/postfix/sender_canonical' /etc/postfix/main.cf && sed -i '/smtpd_use_tls=yes/a smtp_tls_CAfile = /etc/postfix/cacert.pem' /etc/postfix/main.cf && sed -i 's#128#& 172.17.0.0/16#' /etc/postfix/main.cf && mv /home/cacert.pem /etc/postfix/ && postmap hash:/etc/postfix/sasl_passwd && postmap hash:/etc/postfix/sender_canonical

ENTRYPOINT ["./start.sh"]

Put all 3 files in same directory and run

docker build . -t some_tag

 

In previous posts we created subtasks using Python and bash, in this one we’ll use python to do following:

  • Create task which will have variable in name
  • Script will searh if that task already exists and it’s status (resolved/opened)
  • if there is no task with status DONE, it will create it
  • it will check if there is sub tasks for that task, if there is no any it will create subtask
  • if task already exists (with status different than DONE), it will check if there is subtasks, if no it will create it
  • scripts uses client variable as parameter which will be used to build task/sub task name
#!/usr/bin/python
# -*- coding: utf-8 -*-

import sys
import json
import requests
import os
import urllib2
import argparse
from getpass import getpass
from json_tricks import dump,dumps

username = 'user'
password = 'Pass'

client = 'Newbie'

query = "Managed Services On-boarding " + client

def create_subtask(summary, key, line, user, password):

    headers = {"Content-Type": "application/json"}
    data = {"fields": {"project": {"key": key},"parent": {"key": line},"summary": summary,
   "issuetype": {"name": "Sub-task"}}}
    response = requests.post("https://jira.corp.company.com/rest/api/latest/issue/",
    headers=headers,data=json.dumps(data),auth=(user, password))
    out= response.json()
    print out

def jql_query(query, username, password):

    headers = {
   'Content-Type': 'application/json',}
    params = (
    ('jql', 'project="Managed Services" AND summary~"'+query+'" AND issuetype="Task" AND status!="DONE"'),
    )
    response = requests.get('https://jira.corp.company.com/rest/api/2/search', headers=headers, params=params, auth=(username, password))
    data =  response.json()
    return data

data = jql_query(query, username, password)

if data["total"] == 0:

   headers = {"Content-Type": "application/json"}
   data = {"fields":{"labels":["SERVICE_MANAGEMENT"],"reporter":{"name":"user"},"project":{"key":"MS"},"summary":"Managed Services On-boarding {client}".format(**locals()),"description":"Managed service onboarding task for {client} client".format(**locals()),"issuetype":{"name":"Task"}}}
   response = requests.post("https://jira.corp.company.com/rest/api/latest/issue/",
   headers=headers, data=json.dumps(data), auth=(username, password))
   out= response.json()
   print out
   data = jql_query(query, username, password)
   for issue in data['issues']:
      if len(issue['fields']['subtasks']) == 0:
         line = issue['key']
         create_subtask(client + ":CRM – Set up Client kick off meeting.", "MS", line, username, password)

else:
   data = jql_query(query, username, password)
   for issue in data['issues']:
      if len(issue['fields']['subtasks']) == 0:
         line = issue['key']
         create_subtask(client + ":CRM – Set up Client kick off meeting.", "MS", line, username, password)

Monitoring email content using Zabbix

Posted: January 1, 2019 in Linux

In this example we’ll use python script for extracting Job name from email body, moving parsed emails to “Processed” folder,create Zabbix item from that job and create LLD discovery:

Put this script under /usr/lib/zabbix/externalscripts folder

#!/usr/bin/python

import email, imaplib, re, sys, json, base64

#read previously encrypted password from file

with open('/opt/an_sys/output.txt', 'r') as myfile:
    data=myfile.read()

#connect to mailbox,switch to "SNAP" folder and serach for emails with "failed"
#in subject, sent from snapuploader@brevanhoward.com

user = 'monitoring@email.com'
pwd = base64.b64decode(data)

conn = imaplib.IMAP4_SSL("outlook.office365.com")
conn.login(user,pwd)
conn.select("ZABBIX")

#resp, items = conn.uid("search",None, 'All')
resp, items = conn.uid("search" ,None, '(FROM "email@domain.com")')

#f = open('output.txt','w')

#sys.stdout = f

tdata=[]

items = items[0].split()
for emailid in items:
    resp, data = conn.uid("fetch",emailid, "(RFC822)")
    if resp == 'OK':
        email_body = data[0][1].decode('utf-8')
        mail = email.message_from_string(email_body)
        if mail["Subject"].find("failed") > 0:
          #print mail["Subject"]
          regex1=r'Snap:\s*(.+?)\s+failed'
          a=re.findall(regex1 ,mail["Subject"], re.DOTALL)

          #regex2 = r'Job finished'
          #c=re.findall(regex2, email_body, re.IGNORECASE)

          #format string by removing "'","\r\n"," ","|",".","-","__" and "Processor_"

          if a:
           a=[item.replace("'","") for item in a]
           a=[item.replace("\r\n","") for item in a]
           a=[item.replace(" ","_") for item in a]
           a=[item.replace("|","_") for item in a]
           a=[item.replace(".","_") for item in a]
           a=[item.replace("-","") for item in a]
           a=[item.replace("__","_") for item in a]
           a=[item.replace("Processor_","") for item in a]
           seen = set()
           result = []

           for item in a:
		       #remove "_for_" and all after it
			   c = item.split("_for_")[0]
			   #remove digits
               c = ''.join([i for i in c if not i.isdigit()])
			   #limit strings to 36 characters (in order to create zabbix items)
               s = c[:36]
			   #if string ends with "_",remove it
			   s = re.sub("_$", "", s)
			   #replace "__" with empty space
               s = c.replace("__","")
               s = s[:36]
               s = re.sub("_$","",s)
               if s not in seen:
                seen.add(s)

                result.append(s)
                output = " ".join(result)
                output.join(result)

                #create LLD JSON output
                tdata.append({'{#JOB}':output,'{#NAME}':item})
print json.dumps({"data": tdata}, indent=4)

Discovery rule

If it takes some time for items to be created in Zabbix, try reducing update interval,if it doesn’t help try decreasing configuration cache.Configuration cache  contains information on hosts and items to be monitored. It re-creates this cache by default every 60 seconds. This period can be customised by configuration parameter CacheUpdateFrequency (/etc/zabbix/zabbix_server.conf), try setting value between 20-60 seconds, if using zabbix proxy edit ConfigFrequency (/etc/zabbix/zabbix_proxy.conf),restart zabbix service.

Capture.PNG

Item property

1.PNG

Items will be sent by zabbix trapper, if item still doesn’t exit on Zabbix server, email will be left in “Zabbix” folder as long as key for that job is created. This script will be run every 2 minutes

#!/usr/local/bin/python3
from subprocess import run, PIPE
import email
import imaplib
import re
import sys
import logging
import base64
import argparse
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)

# function to send items to Zabbix server using trapper

def zabbix_sender(key, output):
    """
    Sends a message to the Zabbix monitoring server to update the given key
    with the given output. This is designed to be only called whenever
    the service encounters an error.
    Zabbix should be configured with an Zabbix Trapper
    item for any key passed in, and a trigger for any instance where the output
    value of the message has a string length greater than 0. Since this method
    should only be called when something goes wrong, the Zabbix setup for
    listening for this key should be "any news is bad news"
    @param key
    The item key to use when communicating with Zabbix. This should match a key
    configured on the Zabbix monitoring server.
    @param output
    The data to display in Zabbix notifying TechOps of a problem.
    """
    # When I actually did this at work, I had the server and hostname set in an
    # external configuration file. That's probably how you want to do this as
    # opposed to hard-coding it into the script.
    server = "192.168.10.19"
    hostname = "zabbix_host"
    cmd = ["zabbix_sender", "-z", server, "-s",  hostname,
        "-k", key, "-o",  output]
    result = run(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True, check=True)
    return result.stdout, result.stderr
# read previously encrypted password

# log in to mailbox 

parser = argparse.ArgumentParser()
parser.add_argument('-p', '-password', dest='pwd', help='The password for authentication.')
args = parser.parse_args()

user = 'monitoring@domain.com'
pwd = args.pwd

conn = imaplib.IMAP4_SSL("outlook.office365.com")
conn.login(user, pwd)

conn.select("ZABBIX")

# resp, items = conn.uid("search",None, 'All')
resp, items = conn.uid("search", None, '(FROM "email1@domain.com")')
items = items[0].split()
for emailid in items:
    res, data = conn.uid("fetch", emailid, "(RFC822)")
    if resp == 'OK':
        email_body = data[0][1].decode('utf-8')
        mail = email.message_from_string(email_body)
        # search for emails with "failed" word in subject
        if mail["Subject"].find("failed") > 0:
                 # and get job name from subject, that string will be used as Zabbix item
         regex1 = r'Snap:\s*(.+?)\s+failed'
         a=re.findall(regex1, mail["Subject"], re.DOTALL)
         # regex2 = r'Job finished'
         # c=re.findall(regex2, email_body, re.IGNORECASE)
         # format job name (remove "'","\r\n","|",".","-","__" and "Processor"
         if a:
           a = [item.replace("'", "") for item in a]
           a = [item.replace("\r\n", "") for item in a]
           a = [item.replace(" ", "_") for item in a]
           a = [item.replace("|", "_") for item in a]
           a = [item.replace(".", "_") for item in a]
           a = [item.replace("-", "") for item in a]
           a = [item.replace("__", "_") for item in a]
           a = [item.replace("Processor_", "") for item in a]
           seen = set()
           result = []

           for item in a:
          # remove all after "_for_" (including "_for_")
               c = item.split("_for_")[0]
              # remove digits from string
               c = ''.join([i for i in c if not i.isdigit()])
              # had to remove string lenghts to 36 (Zabbix item can't be too long)
               s = c[:36]
               s = re.sub("_$", "", s)
               s = c.replace("__", "")
               s = s[:36]
               # if string ends with "_", remove it,(zabbix item can't end with special characters)
               s = re.sub("_$", "", s)
               # put all strings in list
               if s not in seen:
                seen.add(s)
                result.append(s)
                out = " ".join(result)
                # create Zabbix key from strings (email subjects)
                key = "an.snap[" + out + ",an]"
                # send values to Zabbix with value "failed"
                try:
                  r = zabbix_sender (key, "failed")
                  k = "".join(r)
                  if k.find("failed: 0") > 0:

                 # copy all email from "zabbix" folder to "Proceesed" folder
                   result = conn.uid('COPY', emailid, "Processed")
                   if result[0] == 'OK':
                   # clear "SNAP" folder
                      result = mov, data = conn.uid('STORE', emailid, '+FLAGS', '(\Deleted Items)')
                      # conn.expunge()
                except:
                      continue
        else:
                 # if mail subjects don't contain word "failed" move it to "Ignored" folder
         result = conn.uid('COPY', emailid, "Ignored")
          # print result
         if result[0] == 'OK':
                    # clean up "zabbix folder" folder
            result = mov, data = conn.uid('STORE', emailid, '+FLAGS', '(\Deleted Items)')
           # conn.expunge()

#Disconnect from mailbox
conn.close()
conn.logout()

This script will search for any email  where subject starts with “Darktrace” subject and if it find any will move it to Processed folder, all other emails will be moved to backup folder and all emails in inbox will be deleted (to mark emails as processed), also it will extract email body in human readable format.

#!/usr/bin/python3

from email.message import EmailMessage
import email
import imaplib
import re
import sys
import logging
import base64
import email.parser
import html2text
import requests
import json
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('-mpass', '-mailbox_password', dest = 'mailbox_password', help = 'mailbox password.')
args = parser.parse_args()

user = 'someuser@company.com'
mailbox_password = args.mailbox_password

def get_email_body(body):

       if body.is_multipart():
         for payload in body.get_payload():
             print('To:\t\t', body['To'])
             print('From:\t', body['From'])
             print('Subject:', body['Subject'])
             print('Date:\t',body['Date'])
             for part in body.walk():
               if (part.get_content_type() == 'text/plain') and (part.get('Content-Disposition') is None):
                output = part.get_payload()
       else:
         print('To:\t\t', body['To'])
         print('From:\t', body['From'])
         print('Subject:', body['Subject'])
         print('Date:\t', body['Date'])
         print('Thread-Index:\t', body['Thread-Index'])
         text = f"{body.get_payload(decode=True)}"
         html = text.replace("b'", "")
         h = html2text.HTML2Text()
         h.ignore_links = True
         output = (h.handle(f'''{html}''').replace("\\r\\n", ""))
         output = output.replace("'", "")
         # output in one line
         #output = output.replace('\n'," ")
         output = output.replace('*', "")
         return output

def clear_inbox(conn, dest_folder):
    output=[]
    result = conn.uid('COPY', emailid, dest_folder)
    output.append(result)
    if result[0] == 'OK':
     result = mov, data = conn.uid('STORE',emailid, '+FLAGS', '(\Deleted Items)')
     conn.expunge()

conn = imaplib.IMAP4_SSL("outlook.office365.com")
conn.login(user,mailbox_password)
conn.select("Inbox")

try:

  resp, items = conn.uid("search",None, 'All')
  items = items[0].split()

  for emailid in items:
   resp, data = conn.uid("fetch",emailid, "(RFC822)")
   if resp == 'OK':
     email_body = data[0][1].decode('utf-8')
     email_message = email.message_from_string(email_body)
     subject = email_message["Subject"]
     if subject.lower().startswith('Darktrace'.lower()):
         output = get_email_body(email_message)

         #do some task
         # move emails to Processed folder and clear Inbox
         clear_inbox(conn, "Processed")
     else:
        clear_inbox(conn, "backup")

except IndexError:
     print("No new email")

conn.close()
conn.logout()

Second method is using BeatiufoulSoup HTML parser

from bs4 import BeautifulSoup

def print_payload(message):
    print('')
    if message.is_multipart():
        for payload in message.get_payload():
            print_payload(payload)
    else:
         #print (message.get_payload())
         for part in message.walk():
             if part.get_content_type():
                 body = str(part.get_payload())
                 soup = BeautifulSoup(body,features="html.parser")
                 paragraphs = soup.find_all('p')
                 for paragraph in paragraphs:
                     print(paragraph.text.encode('utf-8').decode('ascii', 'ignore'))

#............................

mail = email.message_from_string(email_body)
print_payload(mail)