I am using the code below. Here I first add hc as host and after starting net I ran a simple switch in hc to make it a controller. The problem is there is no reachability in the network i.e. ping is not working. Could anyone please let me know what is causing this behaviour?
def myNetwork():
# Create an instance of Mininet class i.e. the network with default values
net = Mininet()
info( '*** Adding controller\n' )
#c0 = net.addController('c0')
hc = net.addHost( 'hc', ip='127.0.0.1' )
info( '*** Adding switches\n')
s1 = net.addSwitch('s1')
s5 = net.addSwitch('s5')
s2 = net.addSwitch('s2')
info( '*** Adding links\n')
net.addLink(hc, s1)
net.addLink(s1, s5, cls=TCLink)
net.addLink(s5, s2, cls=TCLink)
hosts = list()
# add all remaining hosts to s2
info( '*** Adding hosts and Links\n')
for i in range (1,11):
name = 'h'+str(i)
host = net.addHost(name)
net.addLink( s2, host, cls=TCLink)
hosts.append(host)
info( '*** Starting network\n')
net.start()
hc.cmdPrint('ryu-manager ryu/simple_switch_13.py \
--verbose 1> tmp/controller-ryu.log 2>&1 &')
# Start the Mininet CLI to run commands
CLI(net)
# Stop the network
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
myNetwork()
A lot of parts are missing in the code, and the indentation was not correct.
hc is not needed to create your remote controller. You can use system("START CONTROLLER COMMAND") to run a remote controller like ryu (but I suggest to run the command outside your script). Than use the net.addController function using cls=RemoteController.
def myNetwork():
# Create an instance of Mininet class i.e. the network with default values
net = Mininet(controller=RemoteController)
#info( '*** Adding controller\n' )
c0 = net.addController('c0', cls=RemoteController)
#hc = net.addHost( 'hc', ip='127.0.0.1' )
info( '*** Adding switches\n')
s1 = net.addSwitch('s1')
s5 = net.addSwitch('s5')
s2 = net.addSwitch('s2')
info( '*** Adding links\n')
#net.addLink(hc, s1)
net.addLink(s1, s5, cls=TCLink)
net.addLink(s5, s2, cls=TCLink)
hosts = list()
# add all remaining hosts to s2
info( '*** Adding hosts and Links\n')
for i in range (1,11):
name = 'h'+str(i)
host = net.addHost(name)
net.addLink( s2, host, cls=TCLink)
hosts.append(host)
info( '*** Starting network\n')
net.start()
#hc.cmdPrint('ryu-manager ryu/simple_switch_13.py \
# --verbose 1> tmp/controller-ryu.log 2>&1 &')
# Start the Mininet CLI to run commands
CLI(net)
# Stop the network
net.stop()
if __name__ == '__main__':
from mininet.log import setLogLevel, info
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.cli import CLI
from mininet.node import RemoteController
setLogLevel( 'info' )
myNetwork()
Related
For my application, new file uploaded to storage is read and the data is added to a main file. The new file contains 2 lines, one a header and other an array whose values are separated by a comma. The main file will need maximum of 265MB. The new files will have maximum of 30MB.
def write_append_to_ecg_file(filename,ecg,patientdata):
file1 = open('/tmp/'+ filename,"w+")
file1.write(":".join(patientdata))
file1.write('\n')
file1.write(",".join(ecg.astype(str)))
file1.close()
def storage_trigger_function(data,context):
#Download the segment file
download_files_storage(bucket_name,new_file_name,storage_folder_name = blob_path)
#Read the segment file
data_from_new_file,meta = read_new_file(new_file_name, scale=1, fs=125, include_meta=True)
print("Length of ECG data from segment {} file {}".format(segment_no,len(data_from_new_file)))
os.remove(new_file_name)
#Check if the main ecg_file_exists
file_exists = blob_exists(bucket_name, blob_with_the_main_file)
print("File status {}".format(file_exists))
data_from_main_file = []
if ecg_file_exists:
download_files_storage(bucket_name,main_file_name,storage_folder_name = blob_with_the_main_file)
data_from_main_file,meta = read_new_file(main_file_name, scale=1, fs=125, include_meta=True)
print("ECG data from main file {}".format(len(data_from_main_file)))
os.remove(main_file_name)
data_from_main_file = np.append(data_from_main_file,data_from_new_file)
print("data after appending {}".format(len(data_from_main_file)))
write_append_to_ecg_file(main_file,data_from_main_file,meta)
token = upload_files_storage(bucket_name,main_file,storage_folder_name = main_file_blob,upload_file = True)
else:
write_append_to_ecg_file(main_file,data_from_new_file,meta)
token = upload_files_storage(bucket_name,main_file,storage_folder_name = main_file_blob,upload_file = True)
The GCF is deployed
gcloud functions deploy storage_trigger_function --runtime python37 --trigger-resource patch-us.appspot.com --trigger-event google.storage.object.finalize --timeout 540s --memory 8192MB
For the first file, I was able to read the file and write the data to the main file. But after uploading the 2nd file, its giving Function execution took 70448 ms, finished with status: 'connection error' On uploading the 3rd file, it gives the Function invocation was interrupted. Error: memory limit exceeded. Despite of deploying the function with 8192MB memory, I am getting this error. Can I get some help on this.
I have this code for connect to MySQL through a SSH, inside of a python class:
def executeQuery(self, query_string):
print("connecting to database " + self.sql_main_database)
with SSHTunnelForwarder(
(
self.ssh_host,
self.ssh_port),
ssh_username = self.ssh_user,
ssh_pkey = self.pkey,
remote_bind_address=(self.sql_hostname, self.sql_port)
) as tunnel:
print("performing connection")
conn = pymysql.connect(
host="127.0.0.1",
user=self.sql_username,
password=self.sql_password,
db=self.sql_main_database,
port=tunnel.local_bind_port)
query = query_string
print("Querying")
data = pd.read_sql_query(query, conn)
print("Done!")
conn.close()
return data
The code is working well, but when the query is not well defined, the notebook freezes.
Then, I tried to use a try/catch, and the code ended like this
def executeQuery(self, query_string):
try:
with SSHTunnelForwarder(
(
self.ssh_host,
self.ssh_port
),
ssh_username = self.ssh_user,
ssh_pkey = self.pkey,
remote_bind_address=(self.sql_hostname, self.sql_port)
) as tunnel:
try:
conn = pymysql.connect(
host = "127.0.0.1",
user = self.sql_username,
password = self.sql_password,
db = self.sql_main_database,
port = tunnel.local_bind_port
)
try:
query = query_string
data = pd.read_sql_query(query, conn)
return data
except DatabaseError as e:
Log.debug(self,str(e))
raise DatabaseError
except pymysql.err.InternalError as e:
Log.debug(self, str(e))
raise DataError
except Exception as e:
Log.debug(self, "[Error]Setting up database: \'" + self.sql_main_database + "\'")
raise DataError
The issue is that pd.read_sql_query never stops so the except is never called, the try won't fail, and the process will just continue forever
The timeout workaround is not possible, because the queries don't have defined execution times and some of them can stay in processing during a couple of hours.
I'm not sure how to fix it.
Indeed the problem was not on the connector, just updating the jupyter version was needed.
I am using python-gnupg package to create GPG public and private key. The generated private key I am storing in AWS secrets manager as follows.
Key: private_key
value: -----BEGIN PGP PRIVATE KEY BLOCK-----
Version: GnuPG v2.0.22 (GNU/Linux)
lQO+BF37qDIBCADXq0iJVRYFb43+YU8Ts63hDgZl49ZNdnDVhd9H0JMXRHqtPqt9
bbFePPN47NRe6z6GsbPaPmDqEE9l3KjFnSZB/yCii+2wHZR0ij2g3ATbiAbOoQQy
I6bbUADmHtcfIJByoXVoDk489nUPt84Xyp1lHiBfCtUmq4w62Okq6InlRhxjxcEx
VvSXaCY8YnEXUAgNGpvcKHDejGS9V4djh7r7lgJ/Y+3Xb2eepOfiaCx2Cn8ZMI0q
7eWH0MmSeR4ueOLeb79ZKjpJraBfV91XplgHHiM18oECWWwsQCFiwi1GVOLpX6Fh
HIoUyaRAW2vZyFcNnO7iLbetie6fE884lfHxABEBAAH+AwMCO+Qoh7o3GWVga9f2
gHEeuGGH4KB3aspQZt/zwKpx7YlDB/uLd4q7JQt35nIH6pfYdMwgQt001CRhsGvX
QVKIkvipQvJZgCO8Nix7xYCukH0cI4TXD7S9BmRNMCPi74+Q1J3cDfKHCseynMNF
GzBeCDx6LW3CVfKKs0Mc2ecSl0c8gxaPDi3AfACRMefEAuVQyo82qJKjpI+O/Yik
z40C5OgK0XfetKstxcH4B0bx0o/PrUpYFM/gHHgFkbpVg5citcvFY4VcEkWryVcg
yF0qBPXP0OKBtCUU1ZGiCwRJy8iGd/dOOICcSCfMNy+jzzM3FSVzei69x7MYt3xu
IzCsmHpDvpdL7tiDDHgwajZeFFPTzf7Ic90K6TapQ3H59xPMxnL9K5o9rP1glRY0
8e4zYjYxg9A6Yl3K5zdqs+M1A3Os70HUlWZXZ4LQNcidPd1rhnPnm9eXkyV2ScXl
dE38aOA5pnrL0WZUM3/OLAToMP6h4rjw9WLqqgWlrl6yz9bhZrfRxlhZaEtNs1bi
pgrmPK/a5fK++BjMSuA94EkXTVNjKWNQBzcmrff27M1TMwN+34NWj3dk/a1gyflP
QZgK3MT+0GaMCcvy1EoZ87ffLQrWwFJOw5nT83yG7VBbuerSEk/tk30bxmYN6HzO
zvQgSjDiiH+ANXVupnzDjjBREmH6V1Hv+7Q0vrjKQHd3eYvKJpAWfFr9kO8DzKck
ZkSMj487SjlHbh33z1yupuwAtjyYQ5tN1adSlDa92t0Q08udnFDQtxXEnL6rw/Du
llEuCEVC9UYcNwwQGMsGXQBFFfj1389WHr0hkSOvyS1nPiIku5kNXDhSWq7/okTS
FwnCt+wbZa6TWbXjwKzHzu4LOarV1s8DnYHKNH6HHIqsVR2oJuIuqhyREAqjeP/T
3bQjQXV0b2dlbmVyYXRlZCBLZXkgPG1laHVsQHBoZWFhLm9yZz6JATkEEwECACMF
Al37qDICGy8HCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIXgAAKCRDO+i9CZ70SvqMn
CACCmdzqZW68j1E45XTHz3fvqdft6fXOyrlMuDdcH2y7Zrl5JS7PlCeHzIcsSMlH
wDYpCG8km7nwZsnWqKsOXFWq1nq/j7Kv5AzR7UmPzTw/1HFSVhIFA0ZZMHAnwp7Y
bcAT+ssvo4To9CjzRp/ZI1k26RFXPWuXETa41DBIVz13Ss4SIaf7UG9FQ55o+2BA
TP48yCQqktiWOoZ0rV1ALSFlE4Gs3UWHcYxxCABA0JB4+FuCRfB8QMreLwFb47wc
dIitbVl0mQx5IXCkqhJKqR62rRy25Put4xnPhXGtXqfoYDVYvYvlsl/FA35cX+Z1
QODnLq/jQ7ZPdaFC7cFqxztk
=RvGa
-----END PGP PRIVATE KEY BLOCK-----
Key: passphrase
Value: secret123
All I want to do is extract Key and Value pair from AWS Secrets manager and import key and later decrypt file.
As you all know JSON doesn't interpret new line characters in a multi line value so GPG import_keys fails to import private key. If I just read local file having the same private key then no problem. Please let me know if there is any workaround for this issue ?
try:
secretkey = self.get_secret(secretName)
if not secretkey:
self.logger.error("Empty secret key")
exit(0)
newdict = json.loads(secretkey)**
# newdict = ast.literal_eval(secretkey)
private_key = newdict['private_key']
# private_key = open('/home/ec2-user/GPG/test_private_key.asc').read()
passphrase = newdict['passphrase']
gpg = gnupg.GPG(gnupghome=gpgHomeDir)
import_result = gpg.import_keys(private_key)
count = import_result.count
if count == 0:
self.logger.error("Failed to import private key")
sys.exit(1)
dataPath = srcDir + "/" + self.dataSource
for root, folders, files in os.walk(dataPath):
if not files:
self.logger.info("No files found so skipping .....")
continue
for filename in folders + files:
fullpath = os.path.join(root,filename)
self.logger.info("Fullpath = {0}".format(fullpath))
out_file = "/tmp/" + filename
with open(fullpath, "rb") as f:
status = gpg.decrypt_file(f, passphrase=passphrase, output=out_file)
if status.ok:
s3Prefix = root.replace(srcDir + '/', '')
s3ObjKey = s3Prefix + "/" + filename
s3InPath = "s3://" + self.inBucketName + "/" + s3Prefix + "/" + filename
with open(out_file, "rb") as fl:
self.s3Client.upload_fileobj(fl,
self.inBucketName,
s3ObjKey
)
except Exception as e:
print(str(e))
self.logger.error(str(e))
sys.exit(1)
I have to use base64 format to store PGP key as follows.
import base64
import gnupg
try:
gpg = gnupg.PGP(gnupghome="/home/guest/GPG")
input_data = gpg.gen_key_input(key_type='RSA',
key_length=2048,
name_email="guest#xyz.com"
passphrase="pass123")
key = gpg.gen_key(input_data)
ascii_armored_public_key = gpg.export_keys(key.fingerprint, armor=True)
ascii_armored_private_key = gpg.export_keys(key.fingerprint, True, armor=True)
b64_encoded_private_key = base64.b64encode(ascii_armored_private_key.encode())
binaryPrivKeyFile = "/tmp/b64encoded_private_key.asc"
with open(binaryPrivKeyFile, 'wb') as bPrivFile:
bPrivFile.write(b64_encoded_private_key)
except Exception as e:
print(str(e))
sys.exit(1)
Now we have to store b64encoded_private_key.asc to AWS secrets manager as follows.
$ aws secretsmanager create-secret --name private-key --secret-binary fileb://b64encoded_private_key.asc --region us-east-1
We cannot store passphrase in the same secret so we have to create separate secret for passphrase as follows.
$ aws secretsmanager create-secret --name passwd --secret-string '{"passphrase" : "pass123"}' --region us-east-1
NOTE: The secret type for private key is binary whereas for passphrase it is plain text.
After creating secret, we can use AWS secrets manager code to get private key and passphrase. The AWS Secrets Manager code decodes private key using base64.b64decode(..) method.
Secrets Manager does not require you to store the data in JSON format, it can store arbitrary strings or binary data.
You could either chose to break everything up and store it in separate secrets, or use a data format that supports new lines like XML.
The private key which you will store, won't have special characters like '\n', '\r'.
To resolve this issue, copy the output of private_key, which will have special characters.
private_key = open('/home/ec2-user/GPG/test_private_key.asc').read()
private_key
Place this private key into your secret & get it using get_secret()
Note: you will see the additional '' in the private key which will get using load_json, to handle that you need to use private_key.replace('\n','\n')
Your code will look like below.
private_key = newdict['private_key']
private_key = private_key.replace('\n','\n')
Then you will be able to get the keys.
This question is about making any nnGraph network run on multiple GPUs and not specific to the following network instance
I am trying to train a network which is constructed with nnGraph. The backward diagram is attached. I am trying to run the parallelModel (see code or fig Node 9) in a multi-GPU setting. If I attach the parallel model to a nn.Sequential container and then create a DataParallelTable it works in a multi-GPU setting (without nnGraph). However, after attaching it to nnGraph I get an error. The backward pass works if I train on a single GPU (setting true to false in the if statements), but in a multi-GPU setting I get an error "gmodule.lua:418: attempt to index local 'gradInput' (a nil value)". I think Node 9 in backward pass should run on multiple-GPUs, however that's not happening. Creating DataParallelTable on nnGraph didn't work for me, however I thought atleast putting internal Sequential networks in a DataParallelTable would work. Is there some other way to split the initial data which is being passed to nnGraph so that it runs on multiple-GPUs?
require 'torch'
require 'nn'
require 'cudnn'
require 'cunn'
require 'cutorch'
require 'nngraph'
data1 = torch.ones(4,20):cuda()
data2 = torch.ones(4,10):cuda()
tmodel = nn.Sequential()
tmodel:add(nn.Linear(20,10))
tmodel:add(nn.Linear(10,10))
parallelModel = nn.ParallelTable()
parallelModel:add(tmodel)
parallelModel:add(nn.Identity())
parallelModel:add(nn.Identity())
model = parallelModel
if true then
local function sharingKey(m)
local key = torch.type(m)
if m.__shareGradInputKey then
key = key .. ':' .. m.__shareGradInputKey
end
return key
end
-- Share gradInput for memory efficient backprop
local cache = {}
model:apply(function(m)
local moduleType = torch.type(m)
if torch.isTensor(m.gradInput) and moduleType ~= 'nn.ConcatTable' then
local key = sharingKey(m)
if cache[key] == nil then
cache[key] = torch.CudaStorage(1)
end
m.gradInput = torch.CudaTensor(cache[key], 1, 0)
end
end)
end
if true then
cudnn.fastest = true
cudnn.benchmark = true
-- Wrap the model with DataParallelTable, if using more than one GPU
local gpus = torch.range(1, 2):totable()
local fastest, benchmark = cudnn.fastest, cudnn.benchmark
local dpt = nn.DataParallelTable(1, true, true)
:add(model, gpus)
:threads(function()
local cudnn = require 'cudnn'
cudnn.fastest, cudnn.benchmark = fastest, benchmark
end)
dpt.gradInput = nil
model = dpt:cuda()
end
newmodel = nn.Sequential()
newmodel:add(model)
input1 = nn.Identity()()
input2 = nn.Identity()()
input3 = nn.Identity()()
out = newmodel({input1,input2,input3})
r1 = nn.NarrowTable(1,2)(out)
r2 = nn.NarrowTable(2,2)(out)
f1 = nn.JoinTable(2)(r1)
f2 = nn.JoinTable(2)(r2)
n1 = nn.Sequential()
n1:add(nn.Linear(20,5))
n2 = nn.Sequential()
n2:add(nn.Linear(20,5))
f11 = n1(f1)
f12 = n2(f2)
foutput = nn.JoinTable(2)({f11,f12})
g = nn.gModule({input1,input2,input3},{foutput})
g = g:cuda()
g:forward({data1, data2, data2})
g:backward({data1, data2, data2}, torch.rand(4,10):cuda())
The code in the "if" statements is taken from Facebook's ResNet implementation
I want to learn yii as my first framework. And I'm trying to make the contact form work. But I got this error:
I've already configured php.ini file from:
C:\wamp\bin\php\php5.3.0
And changed the default to these values:
[mail function]
; For Win32 only.
; http://php.net/smtp
SMTP = ssl:smtp.gmail.com
; http://php.net/smtp-port
smtp_port = 23
; For Win32 only.
; http://php.net/sendmail-from
sendmail_from = myemail#gmail.com
I've seen from here that gmail doesn't use port 25, which is the default in the php.ini. So I used 23. And also opened that port in the windows 7 firewall. Via inbound rules.
Then I also edited the main config in my yii application, to match the email that I'm using:
// application-level parameters that can be accessed
// using Yii::app()->params['paramName']
'params'=>array(
// this is used in contact page
'adminEmail'=>'myemail#gmail.com',
),
);
Finally, I restarted wampserver. Then cleared all my browsing data. Why then to I still see that its pointing out port 25 in the error. Have I miss something? Please help.
Heres a simple python script which could allow you to run a mail server on localhost, you dont have to change anything. Sorry if im a bit late.
import smtpd
import smtplib
import asyncore
class SMTPServer(smtpd.SMTPServer):
def __init__(*args, **kwargs):
print "Running fake smtp server on port 25"
smtpd.SMTPServer.__init__(*args, **kwargs)
def process_message(*args, **kwargs):
to = args[3][0]
msg = args[4]
gmail_user = 'yourgmailhere'
gmail_pwd = 'yourgmailpassword'
smtpserver = smtplib.SMTP("smtp.gmail.com",587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
smtpserver.login(gmail_user, gmail_pwd)
smtpserver.sendmail(gmail_user, to, msg)
print 'sent to '+to
pass
if __name__ == "__main__":
smtp_server = SMTPServer(('localhost', 25), None)
try:
asyncore.loop()
except KeyboardInterrupt:
smtp_server.close()
#end of code
Note: I used args[3][0] and args[4] as to address and message as the args sent by my php mail() corresponded to an array of args[3][0] as receipent email
If you open the php.ini file in WAMP, you will find these two lines:
smtp_server
smtp_port
Add the server and port number for your host (you may need to contact them for details)
The following two lines don't exist by default:
auth_username
auth_password
So you will need to add them to be able to send mail from a server that requires authentication. So an example may be:
smtp_server = mail.example.com
smtp_port = 25
auth_username = example_username#example.com
auth_password = example_password
ps: you should not use your personal mail here. for an obvious reason.
If using WAMP, the php.ini to be configured is present in the wamp/bin/apache/Apache_x_y/bin folder
where _x_y is related to the version of the Apache build used by your wamp installation
uncomment extension=php_openssl.dll at php.ini in WAMP server ("D:\wamp\bin\apache\Apache2.4.4\bin\php.ini")
In the file "D:\wamp\www\mantisbt-1.2.15\config_inc.php"
# --- Email Configuration ---
$g_phpMailer_method = PHPMAILER_METHOD_SMTP;
$g_smtp_host = 'smtp.gmail.com';
$g_smtp_connection_mode = 'ssl';
$g_smtp_port = 465;
$g_smtp_username = 'yourmail#gmail.com';
$g_smtp_password = 'yourpwd';
$g_enable_email_notification = ON;
$g_log_level = LOG_EMAIL | LOG_EMAIL_RECIPIENT;
$g_log_destination = 'file:/tmp/log/mantisbt.log';
$g_administrator_email = 'administrator#example.com';
$g_webmaster_email = 'webmaster#example.com';
$g_from_email = 'noreply#example.com';
$g_return_path_email = 'admin#example.com';
$g_from_name = 'Mantis Bug Tracker';
$g_email_receive_own = OFF;
$g_email_send_using_cronjob = OFF;