Backup AWS Route 53 using AWS Lambda
Need a way to automatically back up your AWS Route53 public DNS zones? Look no further, as a combination of the following AWS products can fit the need:
- Lambda
- Route53
- CloudWatch
- S3
This will execute a Lambda function every 6 hours (or whichever you set the CloudWatch event to). It will use the IAM role to export your Route53 public zones as a CSV & JSON to the S3 bucket of your choice.
- Create a S3 private bucket, as it will be your destination for the backups.
- Set the s3_bucket_name variable to your AWS S3 bucket name.
- Set the s3_bucket_region variable to your AWS S3 region.
- Create an IAM role with an attached policy for Route53 read-only and S3 read/write to your S3 Bucket. An example (and working, IAM policy below):
1{
2 "Version": "2012-10-17",
3 "Statement": [
4 {
5 "Sid": "Stmt1595207085101",
6 "Action": [
7 "route53:Get*",
8 "route53:List*"
9 ],
10 "Effect": "Allow",
11 "Resource": "arn:aws:route53:::example/zone"
12 },
13{
14"Version": "2012-10-17",
15"Statement": [
16 {
17 "Sid": "Stmt1595207429910",
18 "Action": [
19 "route53:List*"
20 ],
21 "Effect": "Allow",
22 "Resource": "arn:aws:route53:::*"
23 }
24 ]
25}
26 {
27 "Sid": "Stmt1595207166058",
28 "Action": [
29 "s3:*"
30 ],
31 "Effect": "Allow",
32 "Resource": "arn:aws:s3:::example/*"
33 }
34 ]
35}
- Create a CloudWatch event for every 6 hours (or desired recurring duration).
- Upload the below Lambda Python function (copy and save it as aws_s3_route53.py for example).
- Assign the execution role to the IAM role created in step 4, and use the scheduled CloudWatch event trigger created in step 5.
- Check the S3 bucket for your backups and verify.
Python Code
1
2 """AWS Route 53 Lambda Backup"""
3
4 import os
5 import csv
6 import json
7 import time
8 from datetime import datetime
9 import boto3
10 from botocore.exceptions import ClientError
11
12
13 # Set environmental variables
14
15 s3_bucket_name = ''
16 s3_bucket_region = ''
17
18 try:
19 s3_bucket_name = os.environ['s3_bucket_name']
20 s3_bucket_region = os.environ['s3_bucket_region']
21 except KeyError as e:
22 print("Warning: Environmental variable(s) not defined")
23
24
25 # Create client objects
26
27 s3 = boto3.client('s3', region_name='us-east-1')
28 route53 = boto3.client('route53')
29
30
31 # Functions
32
33 def create_s3_bucket(bucket_name, bucket_region='us-east-1'):
34 """Create an Amazon S3 bucket."""
35 try:
36 response = s3.head_bucket(Bucket=bucket_name)
37 return response
38 except ClientError as e:
39 if(e.response['Error']['Code'] != '404'):
40 print(e)
41 return None
42 # creating bucket in us-east-1 (N. Virginia) requires
43 # no CreateBucketConfiguration parameter be passed
44 if(bucket_region == 'us-east-1'):
45 response = s3.create_bucket(
46 ACL='private',
47 Bucket=bucket_name
48 )
49 else:
50 response = s3.create_bucket(
51 ACL='private',
52 Bucket=bucket_name,
53 CreateBucketConfiguration={
54 'LocationConstraint': bucket_region
55 }
56 )
57 return response
58
59
60 def upload_to_s3(folder, filename, bucket_name, key):
61 """Upload a file to a folder in an Amazon S3 bucket."""
62 key = folder + '/' + key
63 s3.upload_file(filename, bucket_name, key)
64
65
66 def get_route53_hosted_zones(next_zone=None):
67 """Recursively returns a list of hosted zones in Amazon Route 53."""
68 if(next_zone):
69 response = route53.list_hosted_zones_by_name(
70 DNSName=next_zone[0],
71 HostedZoneId=next_zone[1]
72 )
73 else:
74 response = route53.list_hosted_zones_by_name()
75 hosted_zones = response['HostedZones']
76 # if response is truncated, call function again with next zone name/id
77 if(response['IsTruncated']):
78 hosted_zones += get_route53_hosted_zones(
79 (response['NextDNSName'],
80 response['NextHostedZoneId'])
81 )
82 return hosted_zones
83
84
85 def get_route53_zone_records(zone_id, next_record=None):
86 """Recursively returns a list of records of a hosted zone in Route 53."""
87 if(next_record):
88 response = route53.list_resource_record_sets(
89 HostedZoneId=zone_id,
90 StartRecordName=next_record[0],
91 StartRecordType=next_record[1]
92 )
93 else:
94 response = route53.list_resource_record_sets(HostedZoneId=zone_id)
95 zone_records = response['ResourceRecordSets']
96 # if response is truncated, call function again with next record name/id
97 if(response['IsTruncated']):
98 zone_records += get_route53_zone_records(
99 zone_id,
100 (response['NextRecordName'],
101 response['NextRecordType'])
102 )
103 return zone_records
104
105
106 def get_record_value(record):
107 """Return a list of values for a hosted zone record."""
108 # test if record's value is Alias or dict of records
109 try:
110 value = [':'.join(
111 ['ALIAS', record['AliasTarget']['HostedZoneId'],
112 record['AliasTarget']['DNSName']]
113 )]
114 except KeyError:
115 value = []
116 for v in record['ResourceRecords']:
117 value.append(v['Value'])
118 return value
119
120
121 def try_record(test, record):
122 """Return a value for a record"""
123 # test for Key and Type errors
124 try:
125 value = record[test]
126 except KeyError:
127 value = ''
128 except TypeError:
129 value = ''
130 return value
131
132
133 def write_zone_to_csv(zone, zone_records):
134 """Write hosted zone records to a csv file in /tmp/."""
135 zone_file_name = '/tmp/' + zone['Name'] + 'csv'
136 # write to csv file with zone name
137 with open(zone_file_name, 'w', newline='') as csv_file:
138 writer = csv.writer(csv_file)
139 # write column headers
140 writer.writerow([
141 'NAME', 'TYPE', 'VALUE',
142 'TTL', 'REGION', 'WEIGHT',
143 'SETID', 'FAILOVER', 'EVALUATE_HEALTH'
144 ])
145 # loop through all the records for a given zone
146 for record in zone_records:
147 csv_row = [''] * 9
148 csv_row[0] = record['Name']
149 csv_row[1] = record['Type']
150 csv_row[3] = try_record('TTL', record)
151 csv_row[4] = try_record('Region', record)
152 csv_row[5] = try_record('Weight', record)
153 csv_row[6] = try_record('SetIdentifier', record)
154 csv_row[7] = try_record('Failover', record)
155 csv_row[8] = try_record('EvaluateTargetHealth',
156 try_record('AliasTarget', record)
157 )
158 value = get_record_value(record)
159 # if multiple values (e.g., MX records), write each as its own row
160 for v in value:
161 csv_row[2] = v
162 writer.writerow(csv_row)
163 return zone_file_name
164
165
166 def write_zone_to_json(zone, zone_records):
167 """Write hosted zone records to a json file in /tmp/."""
168 zone_file_name = '/tmp/' + zone['Name'] + 'json'
169 # write to json file with zone name
170 with open(zone_file_name, 'w') as json_file:
171 json.dump(zone_records, json_file, indent=4)
172 return zone_file_name
173
174
175 ## HANDLER FUNCTION ##
176
177 def lambda_handler(event, context):
178 """Handler function for AWS Lambda"""
179 time_stamp = time.strftime("%Y-%m-%dT%H:%M:%SZ",
180 datetime.utcnow().utctimetuple()
181 )
182 if(not create_s3_bucket(s3_bucket_name, s3_bucket_region)):
183 return False
184 #bucket_response = create_s3_bucket(s3_bucket_name, s3_bucket_region)
185 #if(not bucket_response):
186 #return False
187 hosted_zones = get_route53_hosted_zones()
188 for zone in hosted_zones:
189 zone_folder = (time_stamp + '/' + zone['Name'][:-1])
190 zone_records = get_route53_zone_records(zone['Id'])
191 upload_to_s3(
192 zone_folder,
193 write_zone_to_csv(zone, zone_records),
194 s3_bucket_name,
195 (zone['Name'] + 'csv')
196 )
197 upload_to_s3(
198 zone_folder,
199 write_zone_to_json(zone, zone_records),
200 s3_bucket_name,
201 (zone['Name'] + 'json')
202 )
203 return True
204
205
206 if __name__ == "__main__":
207 lambda_handler(0, 0)
Now you can sleep a bit more peacefully knowing that you when\if you blow out a record-set in your hosted public zone, you’ll have a backup!
Recommendations for proper handling
As a general recommendation, it may take an extended duration to back up these records. I recommended increasing the Lambda function timeout settings:
Timeout – The amount of time that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds.
You should increase this to at least 120 seconds
(2 minutes).