Archive for the ‘Amazon Web Services (AWS)’ Category

Changes compared with previous version:

  • Script will search all regions
  • added time when instance is launched
  • added option to terminate all EBS volumes associated with instance
  • uses Simple email service
  • uses Lambda environmental variables

6-1.png

import smtplib
import boto3
import collections
import datetime
import time
import sys
import os
from datetime import datetime
from dateutil.relativedelta import relativedelta
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText

AWSAccountID = boto3.client('sts').get_caller_identity()['Account']
AWSUser = boto3.client('sts').get_caller_identity()['UserId']
AWSAccount = os.environ["AWS_Account_Name"]
# create date variables
date_after_month = datetime.now() + relativedelta(days = 7)
# date_after_month.strftime('%d/%m/%Y')
today = datetime.now().strftime('%d/%m/%Y')

# AWS SES variables
EMAIL_HOST = os.environ["EMAIL_HOST"]
EMAIL_HOST_USER = os.environ["EMAIL_HOST_USER"] # Replace with your SMTP username
EMAIL_HOST_PASSWORD = os.environ["EMAIL_HOST_PASSWORD"] # Replace with your SMTP password
SENT_TO = os.environ["SENT_TO"]
EMAIL_PORT = 587

def lambda_handler(event, context):

    instance_ids = []
    launch_date = ""
    launched = ""
    launched1 = ""
    ec = boto3.client('ec2')
    ec2_regions = [region['RegionName'] for region in ec.describe_regions()['Regions']]
    for region in ec2_regions:
     ec = boto3.client('ec2', region_name = region)
     ec2 = boto3.resource('ec2',region_name = region)
     reservations = ec.describe_instances().get('Reservations', []) 

     for reservation in reservations:
      for instance in reservation['Instances']:
         tags = {}
         for tag in instance['Tags']:
             tags[tag['Key']] = tag['Value']
             if tag['Key'] == 'Name':
               name = tag['Value']
         if not 'Owner' in tags or tags['Owner'] == 'unknown' or tags['Owner'] == 'Unknown':
              instance_ids.append(instance['InstanceId'])  

                # Check if "TerminateOn" tag exists:

              if 'TerminateOn' in tags:
                  # compare TerminteOn value with current date
                    if tags["TerminateOn"] == today:

                    # Check if termination protection is enabled
                     terminate_protection = ec.describe_instance_attribute(InstanceId = instance['InstanceId'] ,Attribute = 'disableApiTermination')
                     protection_value = (terminate_protection['DisableApiTermination']['Value'])
                     #if enabled disable it
                     if protection_value == True:
                        ec.modify_instance_attribute(InstanceId = instance['InstanceId'],Attribute = "disableApiTermination",Value = "False" )

                     volumes_to_delete = ec.describe_instance_attribute(InstanceId=instance['InstanceId'],Attribute='blockDeviceMapping')

                     for v in volumes_to_delete['BlockDeviceMappings']:

                       if volumes_to_delete['InstanceId']:
                        launch_date = str(v['Ebs']['AttachTime'])
                        device_name = v['DeviceName'] 

                        ec.modify_instance_attribute(InstanceId = instance['InstanceId'],Attribute = 'blockDeviceMapping',BlockDeviceMappings = [{'DeviceName': device_name,'Ebs': {'DeleteOnTermination':True}}])

                     # send email that instance is terminated
                     body = "<b>AWS Account:</b>" + AWSAccount + "<b>AWSAccountNumber:</b>" + AWSAccountID + "<b>Instance Name:</b>" + name + "<b>Instance ID:</b>" + instance['InstanceId'] + "<b>Created At:</b>" + launch_date + "<b>To be terminated at</b>:Now <b>Note:</b>er tag is missing from this instance, hence,instance is removed."
                     msg = MIMEMultipart('alternative')
                     msg['Subject'] = "Notification of terminated instances in " + region + " AWS region"
                     msg['From'] = "ses@amazon.com"
                     msg['To'] = SENT_TO
                     html = body
                     mime_text = MIMEText(html, 'html')
                     msg.attach(mime_text)
                     s = smtplib.SMTP(EMAIL_HOST, EMAIL_PORT)
                     s.starttls()
                     s.login(EMAIL_HOST_USER, EMAIL_HOST_PASSWORD)
                     s.sendmail("ses@amazon.com", SENT_TO, msg.as_string())
                     s.quit()
                     # Terminate instance
                     ec.terminate_instances(InstanceIds = [instance['InstanceId']])

                    else:

                      now = datetime.now()
                      future = tags["TerminateOn"]
                      TerminateOn = datetime.strptime(future, "%d/%m/%Y")
                      days = (TerminateOn-now).days
                      volume1 = ec.describe_instance_attribute(InstanceId = instance['InstanceId'],Attribute = 'blockDeviceMapping')
                      for a in volume1['BlockDeviceMappings']:
                        if volume1['InstanceId']:
                         launched1  = str(a['Ebs']['AttachTime'])
                      body = "<b>AWS Account:</b>" + AWSAccount + "<b>AWSAccountNumber:</b>" + AWSAccountID + "<b>Instance Name:</b>" + name + "b&gt;Instance ID:" + instance['InstanceId'] + "<b>Created At:</b>" + launched1 + "Owner tag is mising from this instance, hence,instance will be removed."
                      msg = MIMEMultipart('alternative')
                      msg['Subject'] = "Notification of shutting down instances in " + region + " AWS region"
                      msg['From'] = "ses@amazon.com"
                      msg['To'] = SENT_TO
                      html = body
                      mime_text = MIMEText(html, 'html')
                      msg.attach(mime_text)
                      s = smtplib.SMTP(EMAIL_HOST, EMAIL_PORT)
                      s.starttls()
                      s.login(EMAIL_HOST_USER, EMAIL_HOST_PASSWORD)
                      s.sendmail("ses@amazon.com, SENT_TO, msg.as_string())
                      s.quit()
					  # stop instance
                      ec.stop_instances(InstanceIds=[instance['InstanceId']])

              else:
                 if not 'TerminateOn' in tags:#, create it
                  ec2.create_tags(Resources=[instance['InstanceId']],Tags=[{'Key':'TerminateOn','Value':date_after_month.strftime('%d/%m/%Y')}])
                  volume = ec.describe_instance_attribute(InstanceId = instance['InstanceId'],Attribute = 'blockDeviceMapping')

                  for s in volume['BlockDeviceMappings']:
                     if volume['InstanceId']:
                      launched  = str(s['Ebs']['AttachTime'])

                  body = "<b>AWS Account:</b>" + AWSAccount + "<b>AWS Account Number:</b>"AWSAccountID + "<b>Instance Name:</b>" + name + "<b>Instance ID:</b>" + instance['InstanceId'] + Created<b> At:</b>" + launched + "<b>To be terminated at</b>: (6 days from now) <b>Note:</b>Owner tag is missing from this instance.If you do not wish this instance to be removed, please update the Owner tag."
                  msg = MIMEMultipart('alternative')
                  msg['Subject'] = "Notification of shutting down instances in " + region + " AWS region"
                  msg['From'] = "ses@amazon.com"
                  msg['To'] = SENT_TO
                  html = body
                  mime_text = MIMEText(html, 'html')
                  msg.attach(mime_text)
                  s = smtplib.SMTP(EMAIL_HOST, EMAIL_PORT)
                  s.starttls()
                  s.login(EMAIL_HOST_USER, EMAIL_HOST_PASSWORD)
                  s.sendmail("ses@amazon.com", SENT_TO, msg.as_string())
                  s.quit()
				  # stop instance
                  ec.stop_instances(InstanceIds=[instance['InstanceId']])
Advertisements

In last post we configured site-to-site VPN between StrongSwan and AWS VPC Gateway using stating route. In this one we’ll use BGP.

1-0.PNG

I’ll be creating Site-to-Site VPN between 2 AWS regions, although we usually take adventage of VPC peering, for demonstration purposes i used EC2 instance (CentoOS 7), public IP:3.120.227.213, internal IP:172.31.36.231, AWS VPN gateway creates 2 tunnels, public IPs:34.246.169.212 and 54.49.220.63

Creating AWS VPN Gateway

From AWS VPC console click Customer gateway-New Customer Gateway

1.png

Specify IP adress of StrongSwan server (3.120.227.213) and BGP ASN (65600)

We’ll create BGP on StrongSwan server later on.

2.PNG

Create Virtual Private Gateway

3.PNG

4.PNG

Atach Virtual Private Gateway to VPC-select Virtual private gateway-Action-attach-select VPC and click yes, attach

5.png

Create VPN Connection-click Site-to-Site VPN Connection-create VPN connection

6.PNG

Select Virtual private gateway,Customer gateway, Routing option: Dynamic

7.PNG

Download VPN Gateway configuration

8.png

9.PNG

Install quagga (BGP router emulator) on StrongSwan server

sed -i 's/enforcing/disabled/g' /etc/selinux/config /etc/selinux/config
setenforce 0
yum install iptables-services
systemctl enable iptables
systemctl start iptables
yum install quagga
chmod -R 777 /etc/quagga/
systemctl enable zebra
systemctl start zebra
systemctl start bgpd
systemctl enable bgpd
cp /usr/share/doc/quagga-*/bgpd.conf.sample /etc/quagga/bgpd.conf

Creating BGP area

vtysh
config t
router bgp 65600
network 172.31.36.0/24
neighbor 169.254.20.181 remote-as 64512
neighbor 169.254.21.193 remote-as 64512
#if get BGP is already running; AS is 7675
no router bgp 7675
do write
exit
exit

AWS BGP ASN is 64512

10.PNG

Neighbor are defined in AWS Virtual gateway configuration file

For tunnel 1:
11.PNG

And tunnel 2:

0.PNG

Install StrongSwan

/etc/strongswan/ipsec.conf:

conn %default
# Authentication Method : Pre-Shared Key
#authby=psk
leftauth=psk
rightauth=psk
# Encryption Algorithm : aes-128-cbc
# Authentication Algorithm : sha1
# Perfect Forward Secrecy : Diffie-Hellman Group 2
ike=aes128-sha1-modp1024!
#ike=aes256-sha256-modp2048s256,aes128-sha1-modp1024!
# Lifetime : 28800 seconds
ikelifetime=28800s
# Phase 1 Negotiation Mode : main
aggressive=no
# Protocol : esp
# Encryption Algorithm : aes-128-cbc
# Authentication Algorithm : hmac-sha1-96
# Perfect Forward Secrecy : Diffie-Hellman Group 2
#esp=aes128-sha256-modp2048s256,aes128-sha1-modp1024!
esp=aes128-sha1-modp1024!
# Lifetime : 3600 seconds
lifetime=3600s
# Mode : tunnel
type=tunnel
# DPD Interval : 10
dpddelay=10s
# DPD Retries : 3
dpdtimeout=30s
# Tuning Parameters for AWS Virtual Private Gateway:
keyexchange=ikev1
#keyingtries=%forever
rekey=yes
reauth=no
dpdaction=restart
closeaction=restart
#left=%defaultroute
leftsubnet=0.0.0.0/0,::/0
rightsubnet=0.0.0.0/0,::/0
leftupdown=/etc/strongswan/ipsec-vti.sh
installpolicy=yes
compress=no
mobike=no
conn AWS-VPC-GW1
# Customer Gateway: :
left=172.31.36.231
leftid=3.120.227.213
# Virtual Private Gateway :
right=34.246.169.212
rightid=34.246.169.212
auto=start
mark=100
#reqid=1
conn AWS-VPC-GW2
# Customer Gateway: :
left=172.31.36.231
leftid=3.120.227.213
#leftsubnet=172.31.36.0/24
# Virtual Private Gateway :
right=52.49.220.63
rightid=52.49.220.63
#rightsubnet=172.31.16.0/24
auto=start
mark=200

Tunnel 1 Virtual cutomer/private gateway:

11.PNG

Tunnel 2 Virtual cutomer/private gateway:

12.PNG

Public IP of AWS VPN Gateway tunnel

11-a.PNG

/etc/strongswan/ipsec-vti.sh:

IP=$(which ip)
IPTABLES=$(which iptables)

PLUTO_MARK_OUT_ARR=(${PLUTO_MARK_OUT//// })
PLUTO_MARK_IN_ARR=(${PLUTO_MARK_IN//// })
case "$PLUTO_CONNECTION" in
AWS-VPC-GW1)
VTI_INTERFACE=vti1
VTI_LOCALADDR=169.254.20.182/30
VTI_REMOTEADDR=169.254.20.181/30
;;
AWS-VPC-GW2)
VTI_INTERFACE=vti2
VTI_LOCALADDR=169.254.21.194/30
VTI_REMOTEADDR=169.254.21.193/30
;;
esac

case "${PLUTO_VERB}" in
up-client)
#$IP tunnel add ${VTI_INTERFACE} mode vti local ${PLUTO_ME} remote ${PLUTO_PEER} okey ${PLUTO_MARK_OUT_ARR[0]} ikey ${PLUTO_MARK_IN_ARR[0]}
$IP link add ${VTI_INTERFACE} type vti local ${PLUTO_ME} remote ${PLUTO_PEER} okey ${PLUTO_MARK_OUT_ARR[0]} ikey ${PLUTO_MARK_IN_ARR[0]}
sysctl -w net.ipv4.conf.${VTI_INTERFACE}.disable_policy=1
sysctl -w net.ipv4.conf.${VTI_INTERFACE}.rp_filter=2 || sysctl -w net.ipv4.conf.${VTI_INTERFACE}.rp_filter=0
$IP addr add ${VTI_LOCALADDR} remote ${VTI_REMOTEADDR} dev ${VTI_INTERFACE}
$IP link set ${VTI_INTERFACE} up mtu 1436
$IPTABLES -t mangle -I FORWARD -o ${VTI_INTERFACE} -p tcp -m tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu
$IPTABLES -t mangle -I INPUT -p esp -s ${PLUTO_PEER} -d ${PLUTO_ME} -j MARK --set-xmark ${PLUTO_MARK_IN}
$IP route flush table 220
#/etc/init.d/bgpd reload || /etc/init.d/quagga force-reload bgpd
;;
down-client)
#$IP tunnel del ${VTI_INTERFACE}
$IP link del ${VTI_INTERFACE}
$IPTABLES -t mangle -D FORWARD -o ${VTI_INTERFACE} -p tcp -m tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu
$IPTABLES -t mangle -D INPUT -p esp -s ${PLUTO_PEER} -d ${PLUTO_ME} -j MARK --set-xmark ${PLUTO_MARK_IN}
;;
esac

# Enable IPv4 forwarding
sysctl -w net.ipv4.ip_forward=1
sysctl -w net.ipv4.conf.eth0.disable_xfrm=1
sysctl -w net.ipv4.conf.eth0.disable_policy=1

/etc/strongswan/ipsec-vti.sh:

3.120.227.213 34.246.169.212 : PSK "yZ1oMi60GNgzgXmBSHo84w0M_uYMFL5R"
3.120.227.213 52.49.220.63 : PSK "RDihsBvmWrJ1PbI0HwJ7vMJW24qVJKbx"

If all is fine, both tunnels should be UP

15

 

A virtual private gateway is the VPN concentrator on the Amazon side of the Site-to-Site VPN connection.

customer gateway is a software application  of the Site-to-Site VPN connection.

From AWS console click VPC-Virtual Private Gateways-Create Virtual Private Gateway

1.png

2.PNG

Now Create Customer Gateway: Customer Gateway-Create Customer Gateway

3.png

Routing Static-Enter Public IP of StrongSwan server

4.PNG

Now click Site-to-Site-VPN Connection-Create VPN Connection

5.png

Now select Virtual Private gateway and Customer Gateway we created previously and click Create VPN Connection-Routing Option:Static-Specify remote network local subnet

6.PNG

Click again Virtual Private Gateways-Actions-Attach to VPC – select VPC and click Yes,attach

7.PNG

Allow inbound traffic from StrongSwan server

From Services-VPC-Security Groups-Select Security Group-Inbound Rules-Edit Rule

8.PNG

Add Rule-Type:All traffic-Source StrongSwan IP address

11.PNG

Installing StrongSwan on CentOS 7

If StrongSwan is installed on AWS EC2 disable Source-Destination check

Ensure that /etc/sysctl.conf contains the following lines and then force them to be loaded by running sysctl -p /etc/sysctl.conf or by rebooting:

net.ipv4.ip_forward = 1
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.send_redirects = 0
net.ipv4.tcp_max_syn_backlog = 1280
net.ipv4.icmp_echo_ignore_broadcasts = 1
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.all.secure_redirects = 0
net.ipv4.conf.all.log_martians = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.default.secure_redirects = 0
net.ipv4.icmp_echo_ignore_broadcasts = 1
net.ipv4.icmp_ignore_bogus_error_responses = 1
net.ipv4.tcp_syncookies = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.tcp_mtu_probing = 1

yum install epel-release
yum repolist
yum update
yum install strongswan
systemctl enable strongswan
yum install ntp
systemctl enable ntpd

Replace the server configuration entries in /etc/ntp.conf so the AWS recommended NTP server pool is used:

server 0.amazon.pool.ntp.org iburst
server 1.amazon.pool.ntp.org iburst
server 2.amazon.pool.ntp.org iburst
server 3.amazon.pool.ntp.org iburst

Switch back to AWS console-Site-To-Site VPN Connection-select VPN connection-click Download Confiduration

12.PNG

13.PNG

For tunnel 1 downloaded configuration looks like this:

– IKE version : IKEv1
– Authentication Method : Pre-Shared Key
– Pre-Shared Key : aqke
– Authentication Algorithm : sha1
– Encryption Algorithm : aes-128-cbc
– Lifetime : 28800 seconds
– Phase 1 Negotiation Mode : main
– Diffie-Hellman : Group 2

#2: IPSec Configuration

Configure the IPSec SA as follows:
Category “VPN” connections in the GovCloud region have a minimum requirement of AES128, SHA2, and DH Group 14.
Please note, you may use these additionally supported IPSec parameters for encryption like AES256 and other DH groups like 2, 5, 14-18, 22, 23, and 24.
Higher parameters are only available for VPNs of category “VPN,” and not for “VPN-Classic”.
– Protocol : esp
– Authentication Algorithm : hmac-sha1-96
– Encryption Algorithm : aes-128-cbc
– Lifetime : 3600 seconds
– Mode : tunnel
– Perfect Forward Secrecy : Diffie-Hellman Group 2

/etc/strongswan/ipsec.conf:
conn %default
mobike=no
compress=no
authby=psk
keyexchange=ikev1
ike=aes128-sha1-modp1024!
ikelifetime=28800s
esp=aes128-sha1-modp1024!
lifetime=3600s
rekeymargin=3m
keyingtries=3
installpolicy=yes
dpdaction=restart
type=tunnel
conn dc-aws1
leftsubnet=172.16.40.0/24 #local subnet
right=1.2.3.4 # AWS Gateway Public IP
rightsubnet=10.34.0.0/16 #remoye subnet
auto=start

Store preshared key in /etc/strongswan/ipsec.secrets

1.2.3.4 : PSK "aqke"

restart stronhswan service and check logs:

tail -f /var/log/messages | grep charon

If all is fine tunnel should be UP

10

In this post we configured Windows instances to go to hibernation. In this one we’ll modify Node.JS script to shut down EC2 instances if no one is connected via SSH connections.

Setting SSH idle timeout settings

cat /etc/profile.d/ssh-timeout.sh
export TMOUT=900
readonly TMOUT

Reboot

 

Installing SSM Agent on CentOS 7

yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
systemctl status amazon-ssm-agent
systemctl enable amazon-ssm-agent
systemctl start amazon-ssm-agent

How to create SSM role and how to assign it to instance check out here. and here

Create following tags:

 

Capture

 

Change only code in auto_stop/modules/control/index.js

// Import Dependencies
let AWS = require('aws-sdk');
AWS.config.region = "eu-west-1";

module.exports.getInstanceIds = () => {
return new Promise(
(resolve, reject) => {
let ec2 = new AWS.EC2();
let params = {
Filters: [
{ Name: "instance-state-name",
Values: ["running"]
},
{
Name: "tag:Auto_Stop_Schedule",
Values: ["1"]
},
{
Name: "tag:Auto_Stop_Enabled",
Values: ["True","true", "Yes", "yes"]
},
{
Name: "tag:Auto_Stop_Type",
Values: ["Linux","linux"]
}
]
};

ec2.describeInstances(params, (err, data) => {
if (err) reject(err);

let instanceIds = [];
let reservations = "";
try {
reservations = data.Reservations;
}
catch(err) {
reject(err);
}
if(Array.isArray(reservations)) {
reservations.forEach((reservation) => {
reservation.Instances.forEach((instance) =>{
instanceIds.push(instance.InstanceId);
});
});
if(instanceIds.length >= 1) {
resolve(
{
"InstanceIds": instanceIds
}
);
}
else {
console.log("[Info] getInstanceIds: No instances found.");
resolve();
}
}
else {
reject(new Error("[Error] getInstanceIds: Reservations is not an array."));
}
});
}
);
};

module.exports.shutdownInstances = (controlObj) => {
return new Promise(
(resolve, reject) => {
let ssm = new AWS.SSM();

let instanceIds = controlObj.InstanceIds;
instanceIds.forEach((i) => {
let ssmParams = {
InstanceIds: [i],
DocumentName: "AWS-RunShellScript",
Parameters: {
"workingDirectory":[""],
"executionTimeout":["300"],
"commands":["#!/bin/bash","LOGFOLDER=\"/var/log/ssh_check\"","LOGFILE=\"auto_stop_activity.log\"","","# Check if the ssh_check Log dir exists. If not, create it.","[ -d $LOGFOLDER ] || mkdir -p $LOGFOLDER","","if [[ \"$(/usr/bin/w | wc -l)\" -gt 2 ]];"," then"," echo \"$(date) >>> Live SSH session detected\" "," echo \"$(date) >>> Live SSH session detected\" >> \"$LOGFOLDER/$LOGFILE\""," else"," # If no active SSH sessions are found, shutdown the instance."," echo \"$(date) >>> No running sessions detected\" "," echo \"$(date) >>> Shutting down...\"",""," echo \"$(date) >>> No running sessions detected\" >> \"$LOGFOLDER/$LOGFILE\" "," echo \"$(date) >>> Shutting down...\" >> \"$LOGFOLDER/$LOGFILE\" "," shutdown -P -t 30 > /dev/null 2>&1"," exit 0"," fi"]
},
MaxErrors: "0",
TimeoutSeconds: 120
}

// Hibernate instances
ssm.sendCommand(ssmParams, function(err, data) {
if (err) {
console.log(`Error: ${err}`);
}
console.log("Command Sent");
console.log(`Instance to shutdown: ${instanceIds}`);
});
});
}
);
};

 

OpsWorks for Puppet Enterprise runs Puppet enterprise server in AWS

In AWS console select OpsWorks-Go to OpsWorks for Puppet Enterprise

 

5

 

Create Puppet Enterprise Server

 

6.PNG

 

Specify server name,region and instance size

 

7.PNG

 

Specify EC2 Key pair,Puppet git control repository and private key.

For direction how to create GitHub SSH connection see this post.

 

8.PNG

 

Specify VPC,Subnet,Security Group,System maintenance time and choose if you want to enable automatic backup of AWS Puppet instance

 

9.png

 

During installation download credentials and starter kit.Credentials will be used to authenticate to puppet when accessing via web console

 

10.PNG

Web access:

https://puppet instance ip

Use credentials downloaded in previous step

 

11

 

Installing Windows agent

First allow unauthenticated CA (to solve “access denied” issue when sending certification requests)
In Puppet console click on Classification-Expand PE Infrastructure-PE Master

 

2

 

 

Click on configuration,under Class: puppet_enterprise::profile::master add allow_unauthenticated_ca and set it to true

 

3.png

 

Windows agent is located at Puppet Enterprise server: /opt/puppetlabs/server/data/[ackages/public/<puppet version>/windows-x86_64-<puppet version>/puppet-agent-x64/msi

Transfer that file to Windows node,

Open CMD as administrator and run

 

puppet-agent-x64.msi /qn PUPPET_MASTER_SERVER=my-puppet.opsworks-cm.io PUPPET_AGENT_CERTNAME=wind.example.com
Go to Puppet Enterprise console-Unsigned certs and sign certificate
1

Next procedure will enable monitoring AWS status RSS feeds https://status.aws.amazon.com/

I modified this template and added troubleshooting steps for issues i experienced.

All files can be downloaded from here.

copy AWS_Service_Health_Dashboard.py script to /lib/zabbix//usr/lib/zabbix/externalscripts

pip install feedparser
pip install python-dateutil
 
#need to change System time zone to PDT, otherwise error occurs
 
#/usr/lib/python2.7/site-packages/dateutil/parser/_parser.py:1204: UnknownTimezoneWarning: tzname PDT identified but not understood.  Pass `tzinfos` argument in order to correctly return a timezone-aware datetime.  In a #future version, this will raise an exception.
  #category=UnknownTimezoneWarning)
#/usr/lib/python2.7/site-packages/dateutil/parser/_parser.py:1204: UnknownTimezoneWarning: tzname PST identified but not understood.  Pass `tzinfos` argument in order to correctly return a timezone-aware datetime.  In a #future version, this will raise an exception.
  #category=UnknownTimezoneWarning) 
 
#set PDT time zone to avoid above errors

timedatectl set-timezone America/Chicago

chmod +x AWS_Service_Health_Dashboard.py
chown zabbix:zabix WS_Service_Health_Dashboard.py

./AWS_Service_Health_Dashboard.py "-i" "3600" "-b" NA '-m' "TRUE"
#Output:
 
{"response":"success","info":"processed: 0; failed: 0; total: 0; seconds spent: 0.000005"}
{"response":"success","info":"processed: 0; failed: 0; total: 0; seconds spent: 0.000004"}
{"response":"success","info":"processed: 0; failed: 0; total: 0; seconds spent: 0.000005"}
{"response":"success","info":"processed: 0; failed: 0; total: 0; seconds spent: 0.000005"}
{"response":"success","info":"processed: 0; failed: 0; total: 0; seconds spent: 0.000004"}
{"response":"success","info":"processed: 0; failed: 0; total: 0; seconds spent: 0.000004"}

Create 4 hosts (one for each Region) :NA,SA,EU and AP

Set Visible name same as host name and add host to TIS Templates group

10.PNG

Attach template to all 4 hosts

If needed, disable items for services you don’t want to monitor
If full errors can’t be seen in Zabbix dashboard edit following file (CentOS 7)

vi /usr/share/zabbix/include/items.inc.php
apply value mapping
switch ($item['value_type']) {
case ITEM_VALUE_TYPE_STR:
$mapping = getMappedValue($value, $item['valuemapid']);
// break; is not missing here
case ITEM_VALUE_TYPE_TEXT:
case ITEM_VALUE_TYPE_LOG:
if ($trim && mb_strlen($value) > 80) {
$value = mb_substr($value, 0, 80).'...';

#restart Zabbix service

systemctl restart zabbix-service

If all is OK you should see something like this

11.PNG

This is combination of https://github.com/wawastein/zabbix-cloudwatch and https://github.com/omni-lchen/zabbix-cloudwatch with some modifications from my side (added LLD for Lambda,EBS and Application Load Balancer.

IAM user  has been created with following 2 IAM policies:

{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"sns:ListSubscriptionsByTopic",
"lambda:ListFunctions",
"sns:GetTopicAttributes",
"lambda:ListVersionsByFunction",
"lambda:ListAliases",
"sns:ListTopics",
"sns:GetPlatformApplicationAttributes",
"sns:ListSubscriptions",
"sns:GetSubscriptionAttributes",
"sns:CheckIfPhoneNumberIsOptedOut",
"sns:ListEndpointsByPlatformApplication",
"sns:ListPhoneNumbersOptedOut",
"sns:GetEndpointAttributes",
"lambda:ListEventSourceMappings",
"sns:ListPlatformApplications",
"sns:GetSMSAttributes"
],
"Resource": "*"
}
]
}
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"elasticmapreduce:ListBootstrapActions",
"logs:DescribeSubscriptionFilters",
"logs:DescribeMetricFilters",
"ec2:DescribeSnapshots",
"ec2:DescribeHostReservationOfferings",
"elasticmapreduce:ListInstanceGroups",
"elasticmapreduce:ListInstances",
"ec2:DescribeVolumeStatus",
"elasticmapreduce:ListSecurityConfigurations",
"ec2:DescribeScheduledInstanceAvailability",
"ec2:DescribeVolumes",
"rds:DownloadDBLogFilePortion",
"ec2:DescribeFpgaImageAttribute",
"ec2:DescribeExportTasks",
"logs:FilterLogEvents",
"ec2:DescribeKeyPairs",
"s3:GetIpConfiguration",
"logs:DescribeDestinations",
"ec2:DescribeReservedInstancesListings",
"elasticmapreduce:DescribeSecurityConfiguration",
"events:DescribeRule",
"s3:GetBucketWebsite",
"ec2:DescribeSpotFleetRequestHistory",
"ec2:DescribeVpcClassicLinkDnsSupport",
"ec2:DescribeSnapshotAttribute",
"elasticmapreduce:ListSteps",
"ec2:DescribeIdFormat",
"s3:GetBucketNotification",
"cloudwatch:GetMetricStatistics",
"s3:GetReplicationConfiguration",
"ec2:DescribeVolumeAttribute",
"events:TestEventPattern",
"ec2:DescribeImportSnapshotTasks",
"rds:DescribeReservedDBInstances",
"ec2:DescribeVpcEndpointServicePermissions",
"ec2:GetPasswordData",
"ec2:DescribeScheduledInstances",
"ec2:DescribeImageAttribute",
"cloudwatch:DescribeAlarms",
"ec2:DescribeReservedInstancesModifications",
"ec2:DescribeSubnets",
"logs:ListTagsLogGroup",
"ec2:DescribeMovingAddresses",
"s3:GetLifecycleConfiguration",
"s3:GetBucketTagging",
"s3:GetInventoryConfiguration",
"ec2:DescribeRegions",
"ec2:DescribeFlowLogs",
"ec2:DescribeVpcEndpointServices",
"ec2:DescribeSpotInstanceRequests",
"ec2:DescribeVpcAttribute",
"cloudwatch:ListMetrics",
"rds:DescribeReservedDBInstancesOfferings",
"elasticmapreduce:DescribeStep",
"cloudwatch:DescribeAlarmHistory",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeNetworkInterfaceAttribute",
"rds:DescribeDBInstances",
"rds:DescribeEngineDefaultClusterParameters",
"ec2:DescribeVpcEndpointConnections",
"rds:DescribeEventCategories",
"ec2:DescribeInstanceStatus",
"rds:DescribeEvents",
"s3:ListBucketMultipartUploads",
"ec2:DescribeHostReservations",
"ec2:DescribeBundleTasks",
"logs:TestMetricFilter",
"ec2:DescribeIdentityIdFormat",
"ec2:DescribeClassicLinkInstances",
"s3:GetBucketVersioning",
"ec2:DescribeVpcEndpointConnectionNotifications",
"ec2:DescribeSecurityGroups",
"rds:DescribeDBSnapshotAttributes",
"ec2:DescribeFpgaImages",
"s3:ListAllMyBuckets",
"rds:ListTagsForResource",
"ec2:DescribeVpcs",
"s3:GetBucketCORS",
"s3:GetObjectVersion",
"ec2:DescribeStaleSecurityGroups",
"s3:GetObjectVersionTagging",
"ec2:DescribeVolumesModifications",
"ec2:GetHostReservationPurchasePreview",
"elasticloadbalancing:DescribeLoadBalancerPolicyTypes",
"rds:DescribeEngineDefaultParameters",
"ec2:DescribePlacementGroups",
"ec2:GetConsoleScreenshot",
"ec2:DescribeInternetGateways",
"s3:GetObjectAcl",
"elasticloadbalancing:DescribeLoadBalancers",
"ec2:GetLaunchTemplateData",
"events:ListRuleNamesByTarget",
"cloudwatch:DescribeAlarmsForMetric",
"ec2:DescribeSpotDatafeedSubscription",
"cloudwatch:ListDashboards",
"s3:GetObjectVersionAcl",
"logs:GetLogEvents",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"ec2:DescribeAccountAttributes",
"events:ListRules",
"ec2:DescribeNetworkInterfacePermissions",
"ec2:DescribeReservedInstances",
"elasticloadbalancing:DescribeInstanceHealth",
"ec2:DescribeNetworkAcls",
"ec2:DescribeRouteTables",
"events:ListTargetsByRule",
"ec2:DescribeEgressOnlyInternetGateways",
"cloudwatch:GetDashboard",
"ec2:DescribeLaunchTemplates",
"rds:DescribeDBSnapshots",
"elasticmapreduce:ViewEventsFromAllClustersInConsole",
"ec2:DescribeVpnConnections",
"ec2:DescribeVpcPeeringConnections",
"ec2:DescribeReservedInstancesOfferings",
"ec2:DescribeVpcEndpointServiceConfigurations",
"rds:DescribeDBSecurityGroups",
"ec2:DescribePrefixLists",
"ec2:GetReservedInstancesExchangeQuote",
"ec2:DescribeInstanceCreditSpecifications",
"ec2:DescribeVpcClassicLink",
"s3:ListMultipartUploadParts",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"events:DescribeEventBus",
"s3:GetObject",
"logs:DescribeExportTasks",
"rds:DescribeOrderableDBInstanceOptions",
"s3:GetAnalyticsConfiguration",
"s3:GetObjectVersionForReplication",
"ec2:DescribeVpcEndpoints",
"ec2:DescribeElasticGpus",
"rds:DescribeCertificates",
"ec2:DescribeVpnGateways",
"rds:DescribeOptionGroups",
"s3:ListBucketByTags",
"ec2:DescribeAddresses",
"rds:DescribeDBEngineVersions",
"rds:DescribeDBSubnetGroups",
"cloudwatch:GetMetricData",
"logs:DescribeLogStreams",
"ec2:DescribeInstanceAttribute",
"s3:ListBucketVersions",
"s3:GetBucketLogging",
"ec2:DescribeDhcpOptions",
"s3:GetAccelerateConfiguration",
"rds:DescribePendingMaintenanceActions",
"rds:DescribeDBParameterGroups",
"elasticmapreduce:DescribeCluster",
"s3:GetBucketPolicy",
"ec2:GetConsoleOutput",
"ec2:DescribeSpotPriceHistory",
"s3:GetObjectVersionTorrent",
"s3:GetEncryptionConfiguration",
"ec2:DescribeNetworkInterfaces",
"s3:GetBucketRequestPayment",
"s3:GetObjectTagging",
"elasticmapreduce:ListClusters",
"s3:GetMetricsConfiguration",
"rds:DescribeDBParameters",
"logs:DescribeResourcePolicies",
"rds:DescribeDBClusterSnapshotAttributes",
"rds:DescribeDBClusterParameters",
"rds:DescribeEventSubscriptions",
"logs:DescribeLogGroups",
"ec2:DescribeIamInstanceProfileAssociations",
"ec2:DescribeTags",
"elasticloadbalancing:DescribeTags",
"ec2:DescribeLaunchTemplateVersions",
"ec2:DescribeImportImageTasks",
"rds:DescribeDBLogFiles",
"ec2:DescribeNatGateways",
"s3:GetBucketAcl",
"ec2:DescribeCustomerGateways",
"ec2:DescribeSpotFleetRequests",
"ec2:DescribeHosts",
"ec2:DescribeImages",
"s3:GetObjectTorrent",
"ec2:DescribeSpotFleetInstances",
"ec2:DescribeSecurityGroupReferences",
"rds:DescribeDBClusterSnapshots",
"rds:DescribeOptionGroupOptions",
"rds:DownloadCompleteDBLogFile",
"s3:GetBucketLocation",
"ec2:DescribeConversionTasks",
"rds:DescribeDBClusters",
"rds:DescribeAccountAttributes",
"elasticmapreduce:DescribeJobFlows",
"rds:DescribeDBClusterParameterGroups"
],
"Resource": "*"
}
]
}

Prerequisites:

yum install epel-release
yum install python-pip
yum install jq
pip install boto
pip instal boto3

unzip cloudwatch  zip file and copy content:

aws.discovery, awsLLD.sh and cloudwatch.metric to /usr/lib/zabbix/externalscripts ,make sure files are executable (chmod +x )

Unzip scripts.zip content (it’s folder named scripts) and copy that folder to /usr/lib/zabbix (as in picture bellow-enter IAM user credentails in aws.conf file)

1.PNG

Copy content of cloudwatch_aws.zip (cloudwatch folder) to /opt/zabbix (create that folder if doesn’t exist)

Inside this folders there is file awscred, enter IAM user credentials (i was lazy to point credentials to same file 🙂 )

Make sure following files are set as executable

2.PNG

3.PNG

Test if it works:

/usr/lib/zabbix/scripts/aws_discovery.py --account default --region eu-west-1 --service s3
/usr/lib/zabbix/scripts/aws_discovery.py --account default --region eu-west-1 --service rds
[root@ip-172-31-27-77 scripts]# ./aws_discovery.py --account default --region eu-west-1 --service s3
{"data": [{"{#BUCKET_NAME}": "bucket1"}, {"{#BUCKET_NAME}": "bucket2"}]}

[root@ip-172-31-27-77 scripts]# /usr/lib/zabbix/scripts/aws_discovery.py --account default --region eu-west-1 --service rds
{"data": [{"{#RDS_ID}": "mydb", "{#STORAGE}": 111111}, {"{#RDS_ID}": "mytestore", "{#STORAGE}": 11111},]}

/opt/zabbix/cloudwatch/zabbix-cloudwatch/awsLLD.py -a 'default' -r 'eu-west-1' -q 'ApplicationELB' -c ''
/opt/zabbix/cloudwatch/zabbix-cloudwatch/awsLLD.py -a 'default' -r 'eu-west-1' -q 'EBS' -c ''
/opt/zabbix/cloudwatch/zabbix-cloudwatch/awsLLD.py -a 'default' -r 'eu-west-1' -q 'SNSTopics' -c ''
/opt/zabbix/cloudwatch/zabbix-cloudwatch/awsLLD.py -a 'default' -r 'eu-west-1' -q 'LambdaFunction' -c ''

./awsLLD.py -a 'default' -r 'eu-west-1' -q 'LambdaFunction' -c ''
{
"data": [
{
"{#AWS_REGION}": "eu-west-1",
"{#AWS_ACCOUNT}": "default",
"{#FUNCTION_INAME}": "myfunction",
"{#FUNCTION_NAME}": "myfunction"
}]
}

If something is wrong, probably some prerequisites are not installed properly or files/folder copied to wrong path or some scripts have no +x flag

Creating Zabbix hosts

Create Zabbix hosts for every AWS region where services resides

4.PNG

Attaching Zabbix templates

https://1drv.ms/u/s!AizscpxS0QM4hJ0d_JvivLGeu8nWxg

Create full clone of template for every region and attach it to hosts.

Every template has macros with AWS Zone, change it if needed

4.PNG

Create cron jobs for every resource you want to monitor for Application Load Balancer,EBS,SNS and Lambda

# Lambda monitoring


#--Ireland


*/15 * * * * /opt/zabbix/cloudwatch/zabbix-cloudwatch/cron.d/cron.Lambda.sh "mylambda" "Ireland" "localhost" "default" "eu-west-1" &>/dev/null




# SNS monitoring





#----London

*/10 * * * * /opt/zabbix/cloudwatch/zabbix-cloudwatch/cron.d/cron.SNS.sh " aws-config" "London" "localhost" "default" "eu-west-2" &>/dev/null
*



#Application Load Balancer-----------------------------------------------------------------

*/10 * * * * /opt/zabbix/cloudwatch/zabbix-cloudwatch/cron.d/cron.ApplicationELB.sh "app/loadbalancer/" "Ireland" "localhost" "default" "eu-west-1"


#EBS monitoring----------------------------------------------------------------------
*/12 * * * * /opt/zabbix/cloudwatch/zabbix-cloudwatch/cron.d/cron.EBS.sh "vol-11111111" "aws_north_virginia" "localhost" "default" "us-east-1" &>/dev/null
*

#Ireland


*/10 * * * * /opt/zabbix/cloudwatch/zabbix-cloudwatch/cron.d/cron.EBS.sh "vol-059d78926c41b79c4" "Ireland" "localhost" "default" "eu-west-1" &>/dev/null

 

Make sure all files in /opt/zabbix/cloudwatch/zabbix-cloudwatch/cron.d are executable

 

5.PNG