Good afternoon, I have an application in Django 1.10 where I need to create a backup of the bd, this copy should be made when the user clicks on a button that will be placed in a template and will download the copy in the Team of the user.
In my views.py I have the following.
def backup(request):
subprocess.Popen("mysqldump -u root -p12345 victimas > /home/proyecto/backup.sql")
subprocess.Popen("gzip -c /home/proyecto/backup.sql > /home/proyecto/backup.gz")
dataf = open('/home/proyecto/backups/backup.gz', 'r')
return HttpResponse(dataf.read(), mimetype='application/x-gzip')
But I get the error
[Errno 2] No such file or directory: django mysqldump
Doing this directly from the console creates the file for me, and check the permissions of the folder.
I appreciate your collaboration
As per the Popen documentation, Popen takes a list of arguments. If you pass it a string, it will be treated as the command name - the entire string will be treated as the command, not as a command with arguments.
Split the string argument using:
import shlex
command_line = "mysqldump -u root -p12345 victimas > /home/proyecto/backup.sql"
args = shlex.split(command_line)
subprocess.Popen(args)
I resolved this:
In the Django settings file add:
RUTA = '/path/to_tmp/file/'
In the views.py
import subprocess, gzip
from subprocess import Popen
from victimas.settings import DATABASES, RUTA
def backup(request):
name = DATABASES['default']['NAME']
passwd = DATABASES['default']['PASSWORD']
user = DATABASES['default']['USER']
ruta = RUTA
proc = subprocess.Popen("mysqldump -u "+user+" -p"+passwd+" "+name+" > "+ruta+"backup.sql", shell=True)
proc.wait()
procs = subprocess.Popen("tar -czvf "+ruta+"backup.tar.tgz "+ruta+"backup.sql", shell=True, )
procs.wait()
fs = FileSystemStorage(ruta)
with fs.open('backup.tar.tgz') as tar:
response = HttpResponse(tar, content_type='application/x-gzip')
response['Content-Disposition'] = 'filename="backup.tar.tgz"'
return response
Related
I have an existing API in my AWS account. Now I am trying to use ansible to redeploy api after introducing any resource policy changes.
According to AWS I need to use below CLI command to redeploy the api:
- name: deploy API
command: >
aws apigateway update-stage --region us-east-1 \
--rest-api-id <rest-api-id> \
--stage-name 'stage'\
--patch-operations op='replace',path='/deploymentId',value='<deployment-id>'
Above, 'deploymentId' from previous deployment will be different after every deployment that's why trying to create that as a variable so this can be automated for redeployment steps.
I can get previous deployment information using below CLI:
- name: Get deployment information
command: >
aws apigateway get-deployments \
--rest-api-id 123454ne \
--region us-east-1
register: deployment_info
And output looks like this:
deployment_info.stdout_lines:
- '{'
- ' "items": ['
- ' {'
- ' "id": "abcd",'
- ' "createdDate": 1228509116'
- ' }'
- ' ]'
- '}'
I was using deployment_info.items.id as deploymentId and couldn't able to make this work. Now stuck on what can be Ansible CLI command to get id from output and use this id as deploymentId in deployment commands.
How can I use this id for deploymentId in deployment commands?
I created a small ansible module which you might find useful
#!/usr/bin/python
# Creates a new deployment for an API GW stage
# See https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-deployments.html
# Based on https://github.com/ansible-collections/community.aws/blob/main/plugins/modules/aws_api_gateway.py
# TODO needed?
# from __future__ import absolute_import, division, print_function
# __metaclass__ = type
import json
import traceback
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
def main():
argument_spec = dict(
api_id=dict(type='str', required=True),
stage=dict(type='str', required=True),
deploy_desc=dict(type='str', required=False, default='')
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
api_id = module.params.get('api_id')
stage = module.params.get('stage')
client = module.client('apigateway')
# Update stage if not in check_mode
deploy_response = None
changed = False
if not module.check_mode:
try:
deploy_response = create_deployment(client, api_id, **module.params)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
msg = "Updating api {0}, stage {1}".format(api_id, stage)
module.fail_json_aws(e, msg)
exit_args = {"changed": changed, "api_deployment_response": deploy_response}
module.exit_json(**exit_args)
retry_params = {"retries": 10, "delay": 10, "catch_extra_error_codes": ['TooManyRequestsException']}
#AWSRetry.jittered_backoff(**retry_params)
def create_deployment(client, rest_api_id, **params):
result = client.create_deployment(
restApiId=rest_api_id,
stageName=params.get('stage'),
description=params.get('deploy_desc')
)
return result
if __name__ == '__main__':
main()
I'm using windows7 and MySQL8.0. I've tried to edit the my.ini by stopping the service first. First of all, if I tried to replace my.ini with secure_file_priv = "",it was saying access denied. So, I simply saved it with 'my1.ini' then deleted the my.ini' and again renamed 'my1.ini' to 'my.ini'. Now when I try to start the MySQL80 service from administrative tools>Services, I am unable to start it again. Even I've tried this from the CLI client, but it raises the issue of secure_file_priv. How do I do it? I've been able to store the scraped data into MySQL database using Scrapy,but not able to export it to my project directory.
#pipelines.py
from itemadapter import ItemAdapter
import mysql.connector
class QuotewebcrawlerPipeline(object):
def __init__(self):
self.create_connection()
self.create_table()
#self.dump_database()
def create_connection(self):
"""
This method will create the database connection & the cusror object
"""
self.conn = mysql.connector.connect(host = 'localhost',
user = 'root',
passwd = 'Pxxxx',
database = 'itemcontainer'
)
self.cursor = self.conn.cursor()
def create_table(self):
self.cursor.execute(""" DROP TABLE IF EXISTS my_table""")
self.cursor.execute(""" CREATE TABLE my_table (
Quote text,
Author text,
Tag text)"""
)
def process_item(self, item, spider):
#print(item['quote'])
self.store_db(item)
return item
def store_db(self,item):
"""
This method is used to write the scraped data from item container into the database
"""
#pass
self.cursor.execute(""" INSERT INTO my_table VALUES(%s,%s,%s)""",(item['quote'][0],item['author'][0],
item['tag'][0])
)
self.conn.commit()
#self.dump_database()
# def dump_database(self):
# self.cursor.execute("""USE itemcontainer;SELECT * from my_table INTO OUTFILE 'quotes.txt'""",
# multi = True
# )
# print("Data saved to output file")
#item_container.py
import scrapy
from ..items import QuotewebcrawlerItem
class ItemContainer(scrapy.Spider):
name = 'itemcontainer'
start_urls = [
"http://quotes.toscrape.com/"
]
def parse(self,response):
items = QuotewebcrawlerItem()
all_div_quotes = response.css("div.quote")
for quotes in all_div_quotes:
quote = quotes.css(".text::text").extract()
author = quotes.css(".author::text").extract()
tag = quotes.css(".tag::text").extract()
items['quote'] = quote
items['author'] = author
items['tag'] = tag
yield items
I need post variable with random number value. How can i generate random variable in web scenario? Can i run some script or macro to generate random value for scenario or step?
There is no native way to do it, as you guessed you can make it work with a macro and a custom script.
You can define a {$RANDOM} host macro and use it in the web scenario step as a post field value.
Then you have to change it periodically with a crontabbed script, a python sample like:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Set a random macro to a value.
Provide user from the commandline or from Env var support:
# export ZABBIX_SERVER='https://your_zabbix_host/zabbix/'
# export ZABBIX_USERNAME='admin'
# export ZABBIX_PASSWORD='secretPassword'
$ ./setRandomMacro.py -u admin -p zabbix -Z http://yourzabbix -H yourHost -M '{$RANDOM}'
Connecting to http://yourzabbix
Host yourHost (Id: ----)
{$RANDOM}: current value "17" -> new value "356"
$ ./setRandomMacro.py -u admin -p zabbix -Z http://yourzabbix -H yourHost -M '{$RANDOM}'
Connecting to http://yourzabbix
Host yourHost (Id: ----)
{$RANDOM}: current value "356" -> new value "72"
"""
from zabbix.api import ZabbixAPI
import json
import argparse
import getopt
import sys
import os
import random
# Class for argparse env variable support
class EnvDefault(argparse.Action):
# From https://stackoverflow.com/questions/10551117/
def __init__(self, envvar, required=True, default=None, **kwargs):
if not default and envvar:
if envvar in os.environ:
default = os.environ[envvar]
if required and default:
required = False
super(EnvDefault, self).__init__(default=default, required=required,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
def jsonPrint(jsonUgly):
print(json.dumps(jsonUgly, indent=4, separators=(',', ': ')))
def ArgumentParser():
parser = argparse.ArgumentParser()
parser.add_argument('-Z',
required=True,
action=EnvDefault,
envvar='ZABBIX_SERVER',
help="Specify the zabbix server URL ie: http://yourserver/zabbix/ (ZABBIX_SERVER environment variable)",
metavar='zabbix-server-url')
parser.add_argument('-u',
required=True,
action=EnvDefault,
envvar='ZABBIX_USERNAME',
help="Specify the zabbix username (ZABBIX_USERNAME environment variable)",
metavar='Username')
parser.add_argument('-p',
required=True,
action=EnvDefault,
envvar='ZABBIX_PASSWORD',
help="Specify the zabbix username (ZABBIX_PASSWORD environment variable)",
metavar='Password')
parser.add_argument('-H',
required=True,
help="Hostname",
metavar='hostname')
parser.add_argument('-M',
required=True,
help="Macro to set",
metavar='macro')
return parser.parse_args()
def main(argv):
# Parse arguments and build work variables
args = ArgumentParser()
zabbixURL = args.Z
zabbixUsername = args.u
zabbixPassword = args.p
hostName = args.H
macroName = args.M
# API Connect
print('Connecting to {}'.format(zabbixURL))
zapi = ZabbixAPI(url=zabbixURL, user=zabbixUsername,
password=zabbixPassword)
hostObj = zapi.host.get(search={'host': hostName}, output='hostids')
print('Host {} (Id: {})'.format(hostName, hostObj[0]['hostid']))
currentMacro = zapi.usermacro.get(
hostids=hostObj[0]['hostid'], filter={'macro': macroName})
if (currentMacro):
newMacroValue = random.randint(1, 1001)
print('{}: current value "{}" -> new value "{}"'.format(macroName,
currentMacro[0]['value'], newMacroValue))
zapi.usermacro.update(
hostmacroid=currentMacro[0]['hostmacroid'], value=newMacroValue)
else:
print('No {} macro found on host {}'.format(macroName, hostName))
if __name__ == "__main__":
main(sys.argv[1:])
I cannot get google-drive file-delete() method to work via the Python API.
It is acting broken.
I offer some info about my setup:
Ubuntu 16.04
Python 3.5.2 (default, Nov 12 2018, 13:43:14)
google-api-python-client (1.7.9)
google-auth (1.6.3)
google-auth-httplib2 (0.0.3)
google-auth-oauthlib (0.3.0)
Below, I list a Python script which can reproduce the bug:
"""
googdrive17.py
This script should delete files named 'hello.txt'
Ref:
https://developers.google.com/drive/api/v3/quickstart/python
https://developers.google.com/drive/api/v3/reference/files
Demo (Ubuntu):
sudo apt install python3-pip
sudo pip3 install --upgrade google-api-python-client
sudo pip3 install --upgrade google-auth-httplib2
sudo pip3 install --upgrade google-auth-oauthlib
python3 googdrive17.py
"""
import pickle
import os.path
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# I s.declare a very permissive scope (for training only):
SCOPES = ['https://www.googleapis.com/auth/drive']
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as fh:
creds = pickle.load(fh)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server()
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
# I s.create a file so I can upload it:
with open('/tmp/hello.txt','w') as fh:
fh.write("hello world\n")
# From my laptop, I s.upload a file named hello.txt:
drive_service = build('drive', 'v3', credentials=creds)
file_metadata = {'name': 'hello.txt'}
media = MediaFileUpload('/tmp/hello.txt', mimetype='text/plain')
create_response = drive_service.files().create(body=file_metadata,
media_body=media,
fields='id').execute()
file_id = create_response.get('id')
print('new /tmp/hello.txt file_id:')
print(file_id)
# Q: With googleapiclient, how to filter files list()-response?
# A1: https://developers.google.com/drive/api/v3/reference/files/list
# A2: https://developers.google.com/drive/api/v3/search-files
list_response = drive_service.files().list(
orderBy = "createdTime desc",
q = "name='hello.txt'",
pageSize = 22,
fields = "files(id, name)"
).execute()
items = list_response.get('files', [])
if items:
for item in items:
print('I will try to delete this file:')
print(u'{0} ({1})'.format(item['name'], item['id']))
del_response = drive_service.files().delete(fileId=item['id'])
print('del_response.body:')
print( del_response.body)
print('I will try to emptyTrash:')
trash_response = drive_service.files().emptyTrash()
print('trash_response.body:')
print( trash_response.body)
else:
print('hello.txt not found in your google-drive account.')
When I run the script I see output similar to that listed below:
$ python3 googdrive17.py
new /tmp/hello.txt file_id:
1m8nKOfIeB0E5t60F_-9bKwIJds8PSvYY
I will try to delete this file:
hello.txt (1m8nKOfIeB0E5t60F_-9bKwIJds8PSvYY)
del_response.body:
None
I will try to delete this file:
hello.txt (1Ow4fcUBgEYUy3ezYScDKlLSMbp-hyOLT)
del_response.body:
None
I will try to delete this file:
hello.txt (1TiUrLgQdY1Cb9w0UWHjnmj7HZBaFsKcp)
del_response.body:
None
I will try to emptyTrash:
trash_response.body:
None
$
I see that two of the API calls work well:
files.list()
files.create()
Two calls appear broken:
files.delete()
files.emptyTrash()
Perhaps, though, I call them incorrectly?
How about this modification?
At first, the official document of Files: delete method and Files: emptyTrash method says as follows.
If successful, this method returns an empty response body.
By this, when the file was deleted and the trash was cleared, the returned del_response and trash_response are empty.
Modified script:
From your question, I could understand that files.list() and files.create() works. So I would like to propose the modification points for files.delete() and files.emptyTrash(). Please modify your script as follows.
From:
for item in items:
print('I will try to delete this file:')
print(u'{0} ({1})'.format(item['name'], item['id']))
del_response = drive_service.files().delete(fileId=item['id'])
print('del_response.body:')
print( del_response.body)
print('I will try to emptyTrash:')
trash_response = drive_service.files().emptyTrash()
print('trash_response.body:')
print( trash_response.body)
To:
for item in items:
print('I will try to delete this file:')
print(u'{0} ({1})'.format(item['name'], item['id']))
del_response = drive_service.files().delete(fileId=item['id']).execute() # Modified
print('del_response.body:')
print(del_response)
print('I will try to emptyTrash:')
trash_response = drive_service.files().emptyTrash().execute() # Modified
print('trash_response.body:')
print(trash_response)
execute() was added for drive_service.files().delete() and drive_service.files().emptyTrash().
References:
Files: delete
Files: emptyTrash
If this was not the result you want, I apologize.
The following code fails to connect to a Cisco switch because of the:
RSA key fingerprint is 3e:b7:7b:55:6b:a3:xx:xx:xx:xx
Are you sure you want to continue connecting (yes/no)? yes
#!/usr/bin/env python
from __future__ import print_function
from netmiko import ConnectHandler
import sys
import time
import select
import paramiko
import re
fd = open(r'output_twinax.log','w') # Where you want the file to save to.
old_stdout = sys.stdout
sys.stdout = fd
platform = 'cisco_ios'
username = 'username' # edit to reflect
password = 'password' # edit to reflect
ip_add_file = open(r'IP-list','r') # a simple list of IP addresses you want to connect to each one o
n a new line
for host in ip_add_file:
host = host.strip()
device = ConnectHandler(device_type=platform, ip=host, username=username, password=password)
find_hostname = device.find_prompt()
hostname = find_hostname.replace(">","")
print(hostname)
output = device.send_command('terminal length 0')
output = device.send_command('enable') #Editable to be what ever is needed
output = device.send_command('sh int status | i SFP')
print(output)
fd.close()
Please help modifying it to account for the RSA key. Thank you much.
Did you try use_keys keyword argument?
#!/usr/bin/env python
from __future__ import print_function
from netmiko import ConnectHandler
import sys
import time
import select
import paramiko
import re
fd = open(r'output_twinax.log','w') # Where you want the file to save to.
old_stdout = sys.stdout
sys.stdout = fd
platform = 'cisco_ios'
username = 'username' # edit to reflect
password = 'password' # edit to reflect
# List of IP addresses in each line
ip_add_file = open(r'IP-list','r')
key_file = "./rsa_key.txt"
for host in ip_add_file:
host = host.strip()
device = ConnectHandler(device_type=platform,
ip=host,
username=username,
key_file=key_file,
use_keys=True)
find_hostname = device.find_prompt()
hostname = find_hostname.replace(">","")
print(hostname)
output = device.send_command('terminal length 0')
output = device.send_command('enable')
output = device.send_command('sh int status | i SFP')
print(output)
fd.close()