Archive for the ‘Linux’ Category

In previous post we deployed single machine by python script using terraform plugin.

In this one we’ll go through JSON file, extract username and count of instances and based on it create x instances for x user.

In this file for Djukes one instance will be created, for JWilson 2, for eflame 3.

JSON file:

{
"squadName": "Super hero squad",
"homeTown": "Metro City",
"formed": 2016,
"secretBase": "Super tower",
"active": true,
"customers": [
{
"name": "Molecule Man",
"age": 29,
"email": "DJukes@gmail.com",
"instances": 1,
"powers": [
"Radiation resistance",
"Turning tiny",
"Radiation blast"
]
},
{
"name": "Madame Uppercut",
"age": 39,
"email": "JWilson@gmail.com",
"instances": 2,
"powers": [
"Million tonne punch",
"Damage resistance",
"Superhuman reflexes"
]
},
{
"name": "Eternal Flame",
"age": 1000000,
"email": "eflame@gmail.com",
"instances": 3,
"powers": [
"Immortality",
"Heat Immunity",
"Inferno",
"Teleportation",
"Interdimensional travel"
]
}
]
}

Terraform files:

Get public IP:

output.tf

output "id" {
description = "List of IDs of instances"
value = ["${aws_instance.win-example.*.public_ip}"]
}

Security group-for each user allow-all will be replaced with particular user,so for every user new Security group will be created-name of security group will be username

resource "aws_security_group" "allow-all" {
name="allow-all"
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 0
to_port = 6556
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
tags {
Name = "allow-RDP"
}
}

vars.tf

variable "AWS_REGION" {
  default = "eu-west-1"
}
variable "WIN_AMIS" {
  type = "map"
  default = {
    us-east-1 = "ami-30540427"
    us-west-2 = "ami-9f5efbff"
    eu-west-1 = "ami-cc821eb5"
  }
}


variable "count" {

default="1"

}



variable "PATH_TO_PRIVATE_KEY" {
  default = "mykey"
}
variable "PATH_TO_PUBLIC_KEY" {
  default = "mykey.pub"
}
variable "INSTANCE_USERNAME" {
#  default = "Terraform"
}
variable "INSTANCE_PASSWORD" {
 default="Passw0rd012345"

}

windows.tf

resource "aws_instance" "win-example" {
  ami = "${lookup(var.WIN_AMIS, var.AWS_REGION)}"
  instance_type = "t2.medium"
  count="${var.count}"
  lifecycle {
ignore_changes="ami"

}

   
  vpc_security_group_ids=["${aws_security_group.allow-all.id}"]
#key_name = "${aws_key_pair.mykey.key_name}"
  user_data = <
net user ${var.INSTANCE_USERNAME} '${var.INSTANCE_PASSWORD}' /add /y
net localgroup administrators ${var.INSTANCE_USERNAME} /add

winrm quickconfig -q
winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="300"}'
winrm set winrm/config '@{MaxTimeoutms="1800000"}'
winrm set winrm/config/service '@{AllowUnencrypted="true"}'
winrm set winrm/config/service/auth '@{Basic="true"}'

netsh advfirewall firewall add rule name="WinRM 5985" protocol=TCP dir=in localport=5985 action=allow
netsh advfirewall firewall add rule name="WinRM 5986" protocol=TCP dir=in localport=5986 action=allow

net stop winrm
sc.exe config winrm start=auto
net start winrm

EOF

  provisioner "file" {
    source = "test.txt"
    destination = "C:/test.txt"
  }
  connection {
    type = "winrm"
    timeout = "10m"
    user = "${var.INSTANCE_USERNAME}"
     password = "${var.INSTANCE_PASSWORD}"
      
}

tags {
Name="${format("${var.INSTANCE_USERNAME}-%01d",count.index+1)}"

}
}

For every user new folder will be created, all above terraform files will be copied to every user’s folder, one copied sg.tf and windows.tf files will be searched for “allow-all” and replaced with username variable,it is needed because for each user new security group needs to be created

#!/bin/python
import sys
import json
import os.path
import shutil
from os import mkdir
from pprint import pprint
from python_terraform import *

#open JSON file
json_data=open('./my.json')
data = json.load(json_data)

json_data.close()

 

#Function which will create instances,parameters are username and count of instances fetched from JSON file

def myfunc():

tf = Terraform(working_dir=final_path, variables={'count':count,'INSTANCE_USERNAME':user})
tf.plan(no_color=IsFlagged, refresh=True, capture_output=False)
approve = {"auto-approve": True}
print(tf.init(reconfigure=True))
print(tf.plan())
print(tf.apply(**approve))
return

 

 

 

# sweep through JSON file and store username and number of instances into user and count variables
for i in range (0, len (data['customers'])):
#print data['customers'][i]['email']
k=data['customers'][i]['email']
#print(k.split('@')[0])
user=k.split('@')[0]
#print(user)
count=data['customers'][i]['instances']
#print(count)
#enter = int(input('Enter number of instances: '))

#define "root" directory
start_path="/home/ja/terraform-course/demo-2b/"

#in order to avoid instance recreation,folder for each user needs to be created


#define subdirectories named by user and create it it folder doesn't exist
final_path=os.path.join(start_path,user)
if not os.path.exists(final_path):
os.makedirs(final_path)
#copy terraform files to each newly created folder for user

shutil.copy2('./vars.tf', final_path)
shutil.copy2('./sg.tf', final_path)
shutil.copy2('./windows.tf', final_path)
shutil.copy2('./provider.tf', final_path)
shutil.copy2('./test.txt', final_path)
shutil.copy2('./output.tf', final_path)

 

#for each user new security group needs to be created.Name of SG will be username
final=os.path.join(final_path,'sg.tf')
final1=os.path.join(final_path,'windows.tf')

#replace current name (allow-all) with variable username in sg.tf and windows.tf files
with open(final, 'r') as file :
filedata = file.read()
filedata = filedata.replace('allow-all', user)
with open(final, 'w') as file:
file.write(filedata)
with open(final1, 'r') as file :
filedata = file.read()
filedata = filedata.replace('allow-all', user)
with open(final1, 'w') as file:
file.write(filedata)

#call function for running terraform
myfunc()

 

#in each user folder open terraform.tfstate file and extract public IP to variable ip

final2=os.path.join(final_path,'terraform.tfstate')
json_data=open(final2)
data1 = json.load(json_data)
json_data.close()
#write Public IP,username and password to /home/ja/terraform-course/demo-2b/.txt file

filename="/home/ja/terraform-course/demo-2b/"+user+".txt"
print(filename)
for i in range (0, len (data1['modules'])):
ip=','.join(data1['modules'][i]['outputs']['id']['value'])
sys.stdout = open(filename,'wt')
print("Username is:"+" "+ user+".Password is Passw0rd01234.IP addrress is:"+ip)

 

Advertisements

python-terraform is a python module provide a wrapper of terraform command line tool.More details here

Installation is simple:

pip install python-terraform

Now we can use python script to interact with terraform. In this example we’ll pass number of instances as variable to python script and new instances will be created

Python script

 

#!/bin/python

enter = int(input('Enter number of instances: '))

from python_terraform import *
tf = Terraform(working_dir='/home/ja/terraform/demo-3', variables={'count':enter})
tf.plan(no_color=IsFlagged, refresh=False, capture_output=True)
approve = {"auto-approve": True}
print(tf.plan())
print(tf.apply(**approve))

 

variables={‘count’:enter}

count is variable name specified in vars.tf file, enter is variable specified in python script to which we’ll pass number of instances interactively

Because enter variable is variable, single quotes had to be removed, otherwise, quotes needs to be put around that variable also

 

Running script above will spin-up as many instances as we specified at prompt:

Capture

 

Files in /home/ja/terraform/demo-3 folder

instances.tf

resource "aws_instance" "example" {
  ami = "${lookup(var.AMIS, var.AWS_REGION)}"
  instance_type = "t2.micro"

count="${var.count}"

tags {
Name="${format("test-%01d",count.index+1)}"
}

 output "ime" {
   value = ["${aws_instance.example.*.tags.Name}","${aws_instance.example.*.public_ip}"]
}

 

vars.tf (variable file)

 

variable "AWS_ACCESS_KEY" {
}

variable "count" {
default=2
}

variable "AWS_SECRET_KEY" {
}
variable "AWS_REGION" {
  default = "eu-west-1"
}
variable "AMIS" {
  type = "map"
  default = {
    us-east-1 = "ami-13be557e"
    us-west-2 = "ami-06b94666"
    eu-west-1 = "ami-844e0bf7"
  }
}

 

provider.tf

 

provider "aws" {
    access_key = "${var.AWS_ACCESS_KEY}"
    secret_key = "${var.AWS_SECRET_KEY}"
    region = "${var.AWS_REGION}"
}

 

Creating Rundec ACL policies

Posted: February 9, 2018 in Linux, RunDeck

Creating role

vi /var/lib/rundeck/exp/webapp/WEB-INF/web.xml

search for section security-role

1

Creating a user

The format is

username:password,rolename

vi /etc/rundeck/realm.properties
demo:demo,user,demo

We created user demo with password demo and put it to demo role

Creating policy

In this example, we’ll create policy for allowing demo role to see only aws project

(-c Context: either ‘project’ or ‘application’.

-c application   Access to projects, users, storage, system info, execution management.

-c project  Access to jobs, nodes, events, within a project.

-a allow

  • Reading read
  • Deleting delete
  • Configuring configure
  • Importing archives import
  • Exporting archives export
  • Deleting executions delete_execution
  • Export project to another Rundeck instance promote
  • Full access admin

-g group

-p project

-j job (read,update,delete,run,runAs,kill,killAs,create)

 

Access to projects (read-only)

rd-acl is tool for creating code which we can append to policy file (usually to /etc/rundeck/admin.aclpolicy)

rd-acl create -c application -g demo -p aws -a read,delete,import>>/etc/rundeck/admin.aclpolicy

Command output:

---
by:
  group: demo
context:
  application: rundeck
for:
  project:
  - allow:
    - read
    - import
    - delete
    equals:
      name: aws
description: generated

Members of demo role will be able to see only aws project

1.PNG

If we need that role have access to multiple projects we just need to add following line in /etc/rundeck/admin.aclpolicy file

---
by:

group: demo

context:

application: rundeck

for:

project:

- allow:

- read

- import

- delete

equals:

name: demo

description: generated

Access to jobs

If we want to allow some jobs we need to type following:

rd-acl create -c project -p aws -g demo -j job2 -a read,run,kill>> /etc/rundeck/admin.aclpolicy

Code added to policy file:

---
by:
  group: demo
context:
  project: aws
for:
  job:
  - allow:
    - read
    - run
    - kill
    equals:
      name: 'jobs'

Access to Activity tab

-G (node,event,job),generic
-G event (read,create)

rd-acl create -c project -p aws -g demo -G event -a read >> /etc/rundeck/admin.aclpolicy

Code in policy:

---
by:
  group: demo
context:
  project: aws
for:
  resource:
  - allow: read
    equals:
      kind: event
description: generated

 

 

1.PNG

 

Access to nodes

 

-G node (read,create,update,refresh)

rd-acl create -c project -p aws -g demo -G node -a read>> /etc/rundeck/admin.aclpolicy

Policy code:

---
by:
  group: demo
context:
  project: aws
for:
  resource:
  - allow: read
    equals:
      kind: node
description: generated

Node access can be allowed based on node tag -t (read,create,update,refresh)

rd-acl create -c project -p aws -g demo -G node -t prod -a read,refresh
---by:
  group: demo
context:
  project: aws
for:
  node:
  - allow:
    - read
    - refresh
    contains:
      tags:
      - prod
description: generated

Now, users who belong to demo role can see only node with tag named prod
 

 

In last post we added node to Rundeck, now we’ll add EC2 instance as node

First,we need to add AWS EC2 plugin

cd /var/lib/rundeck/libext/
wget https://github.com/rundeck-plugins/rundeck-ec2-nodes-plugin/releases/download/v1.5.1/rundeck-ec2-nodes-plugin-1.5.1.jar
systemctl restart rundeckd

Now create New project-Add source-AWS EC2 Resources

1.PNG

Specify Access Key, Secret Key, Endpoint (for list of endpoint refer to https://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)

In mapping parameters field specify:

name.selector=tags/Name;

hostname.selector=publicIcDnsName;

description.default=Ec2 node instance;

osArch.selector=architecture;

osFamily.selector=platform;

osFamily.default=unix;

osName.selector=platform;

osName.default=Linux;

username.selector=tags/Rundeck-User;

username.default=root;

ssh-keypath.default=/var/lib/rundeck/.ssh/id_rsa;

editUrl.default=https://console.aws.amazon.com/ec2/home#c=EC2&s=Instances;

attribute.publicIpAddress.selector=publicIpAddress;

attribute.publicDnsName.selector=publicDnsName;

tags.selector=tags/Rundeck-Tags

Click Save, EC2 node(s) should be visible in Rundeck

Line in projet properties

resources.source.2.config.mappingParams=name.selector\=tags/Name;hostname.selector\=publicDnsName;description.default\=Ec2 node instance;osArch.selector\=architecture;osFamily.selector\=platform;osFamily.default\=unix;osName.selector\=platform;osName.default\=Linux;username.selector\=tags/Rundeck-User;username.default\=root;ssh-keypath.default\=/var/lib/rundeck/.ssh/id_rsa;editUrl.default\=https\://console.aws.amazon.com/ec2/home\#c\=EC2&s\=Instances;attribute.publicIpAddress.selector\=publicIpAddress;attribute.publicDnsName.selector\=publicDnsName;tags.selector\=tags/Rundeck-Tags

1.PNG

On Rundeck server, if not already done create key pair

ssh-keygen –t rsa
cp /root/.ssh/id_rsa /var/lib/rundeck/.ssh/id_rsa
cp /root/.ssh/id_rsa.pub /var/lib/rundeck/.ssh/id_rsa.pub

Now, copy content id_rsa.pub to EC2 instance to /root/.ssh/authorized_keys

In Rundeck GUI, click on project-Nodes, EC2 instance should be visible

 

1.PNG

 

Also, command should be executed

 

1.PNG

Rundeck is open source software that helps  automate routine operational procedures in data center or cloud environments

Installation:

Rundeck can be configured to use RDB instead of default file-based data storage. RDB is recommended in large environment.In this post we’ll use file-based storage.

Rundeck requires java

# yum install java-1.8.0-openjdk java-1.8.0-openjdk-devel -y

Create java.sh file in /etc/profile/d and and content below:

#!/bin/bash

JAVA_HOME=/usr/bin/java

PATH=$JAVA_HOME/bin:$PATH

export PATH JAVA_HOME

export CLASSPATH=.

Then make file executable

chmod +x /etc/profile.d/java.sh
source /etc/profile.d/java.sh

Rundeck is available on port 4440-that port needs to be open:

Add below line into file: /etc/sysconfig/iptables

-A INPUT -p tcp -m state --state NEW -m tcp --dport 4440 -j ACCEPT
/etc/init.d/iptables restart

Installing Rundeck:

rpm -Uvh http://repo.rundeck.org/latest.rpm 
yum install rundeck
/etc/init.d/rundeckd start

To make sure the service is running:

/etc/init.d/rundeckd status
netstat -anp | grep '4440\|4443'

The default username and password is admin:admin, if password change for admin is required then edit the file: /etc/rundeck/realm.properties

Comment out the following line in file: /etc/rundeck/rundeck-config.properties

# Comment this out from:
grails.serverURL=http://localhost:4440

# To:
grails.serverURL=http://ip address:4440

Modify the below lines in file: /etc/rundeck/framework.properties

framework.server.name = localhost
framework.server.hostname = localhost
framework.server.port = 4440
framework.server.url = http://localhost:4440

to

framework.server.name = ip address
framework.server.hostname = ip address
framework.server.port = 4440
framework.server.url = http://ip address:4440

Now, restart the service and try to login: http://ipaddress:4440

Adding nodes

At this moment, there is no feature which would allow adding nodes using GUI
https://github.com/rundeck/rundeck/issues/1584

Create New project

1.png

Clear SSH key path

1.png

And click Create

1.png

Go to /var/rundeck/projects//etc
Edit resources.xml file

Add following line for every new node (server which needs to be managed)

 

1.png

New node appears in Web interface

1.png

To add another node just copy node line and change name and node IP address

 

<?xml version=”1.0″ encoding=”UTF-8″?>

<project>

<node name=”104.40.229.72″ description=”Rundeck server node” tags=”” hostname=”13.93.50.94″ osArch=”amd64″ osFamily=”unix” osName=”Linux” osVersion=”3.10.0-693.11.6.el7.x86_64″ username=”rundeck”/>

<node name=”node1″ description=”My First Node” tags=”node1″ hostname=”40.68.243.65″ osArch=”amd64″ osFamily=”unix” osName=”Linux” username=”root” ssh-key-storage-path=”keys/Linuxtopic/server.1key”/>

<node name=”node2″ description=”My Second Node” tags=”node2″ hostname=”52.166.238.107″ osArch=”amd64″ osFamily=”unix” osName=”Linux” username=”root” ssh-key-storage-path=”keys/Linuxtopic/server.1key”/>

</project>

 

Creating keypair on Rundeck server

ssh-keygen

Copy private key to clipboard:

cat /root/.ssh/id_rsa

copy content to clipboard

Now, on Rundeck interface click settings (cog icon)-Key Storage

 

1.png

Click Add or Upload a Key

 

1.png

 

Make sure Private Key is selected from drop-down list, paste content of ~/.ssh/id_rsa
And give key a name. Note:storage path and key name must reflect names in /var/rundeck/projects//etc resources.xml file
(ssh-key-storage-path=”keys/Linuxtopic/server.key”)

Instead of Private/Public keys, password can be used as authentication method

 

1.png

 

On client (node) create authorized_keys file (under /root/.ssh)
Copy content of id_rsa.pub file (public key) from Rundeck server to authorized_keys file on node machine
Repeat same step for every new node (copy public key from Rundeck server to /root/.ssh/authorized_keys file on every node

Running command

Now when we added node, we can run command on it, from Rundeck server go to commands-type command
From nodes, type node name-Click Run on node

 

1.png

 

Key storage

Private key uploaded to Rundeck server in previous steps are located locally on Rundeck server

/var/lib/rundeck/var/storage/content/keys// folder

 

1.png

Amazon Route 53 (Route 53) is a scalable and highly available Domain Name System (DNS). It is part of Amazon.com’s cloud computing platform, Amazon Web Services (AWS). The name is a reference to TCP or UDP port 53, where DNS server requests are addressed

route3.tf file

In this file DNS zone astrahome.xyz is created, added two A records, one for WWW and second for server1, then one MX record with TTL (time-to-live),determines how frequently your DNS records get updated.MX records are in fact google mail servers

104.236.247.8 presents public IP address

Last section just outputs Amazon name servers

resource "aws_route53_zone" "some-zone" {
name = "astrahome.xyz"
}
resource "aws_route53_record" "server1-record" {
zone_id = "${aws_route53_zone.some-zone.zone_id}"
name = "server1.astrahome.xyz"
type = "A"
ttl = "300"
records = ["104.236.247.8"]
}
resource "aws_route53_record" "www-record" {
zone_id = "${aws_route53_zone.some-zone.zone_id}"
name = "www.astrahome.xyz"
type = "A"
ttl = "300"
records = ["104.236.247.8"]
}
resource "aws_route53_record" "mail1-record" {
zone_id = "${aws_route53_zone.some-zone.zone_id}"
name = "aztrahome.xyz"
type = "MX"
ttl = "300"
records = [
"1 aspmx.l.google.com.",
"5 alt1.aspmx.l.google.com.",
"5 alt2.aspmx.l.google.com.",
"10 aspmx2.googlemail.com.",
"10 aspmx3.googlemail.com."
]
}

output "ns-servers" {
value = "${aws_route53_zone.some-zone.name_servers}"
}

provider.tf-specifies AWS region

provider "aws" {
    region = "${var.AWS_REGION}"
}

vars.tf-variable file, in this case we defined only one variable-aws region

variable "AWS_REGION" {
  default = "eu-west-1"
}

Unlike previous examples, in this one we don’t need file with AWS credentials, because we can install AWS CLI tools, but first we need to install python, because i’m used CentOS minimal, i used this approach to install it. Then i installed AWS CLI.

Now keys are located on local machine (ls ~/.aws), so no need for storing it on terraform file.When Route 53 is deployed using terraform

1.PNG

 

We can check AWS console

1.PNG

 

In one of previous posts we created multiple EC2 instances using modules, instead of using modules, we can just add following line instance.tf file

count=X (where X is number of instances
tags {
Name="${format("test-%01d",count.index+1)}"
}

01d is number of “zero” prefixes-1 (test-01) if we want more zeros change number in front of d (test-%03d would be test002,for example).For the sake of simplicity i created just one terraform file

provider "aws" {
access_key="access keys here"
secret_key="secret keys here"
region = "eu-west-1"
}

variable "count" {
default=2
}


resource "aws_instance" "example" {
count="${var.count}"
ami = "ami-d834aba1"
instance_type = "t2.micro"
tags { Name="${format("test-%01d",count.index+1)}" }
}

output "ip" {
    value = "${aws_instance.example.*.public_ip}"
}

In this case 2 instances were created, with test-1/2 tags

1