How can I use both of the compiler in one sconscript?? Some of the details of the sconscript is shown below:
I have a .cu file and with nvcc -c test.cu, I could generate a .o file. How can I import this .o file to the existing sconscript, so that the .o file can be read?
I'm also trying to use both the compiler in one sconscript file. I just couldn't figure out how to execute the gcc and nvcc in one sconsript.
Import('env')
env = env.Clone()
env['CC'] = ['gcc']
env['CXX'] = ['g++-6']
env['CPPPATH'] = []
env['CCFLAGS'] = []
env['LIBPATH'] = []
env['LIBS'] = []
ldflags = []
env['CCFLAGS'] += ['-std=c++11']
import os
src_files = []
for root, dirs, files in os.walk("./src", topdown=False):
for dir in dirs:
src_files += Glob("%s/*.cpp" % os.path.join(root, dir))
obj_dir = 'build/%s/%s' % (build_dir, second_build_dir)
objs = []
for src_file in src_files:
target_file = str(src_file).replace('src', obj_dir, 1)
target_file = target_file.replace('cpp', 'o')
objs += env.Object(target_file, src_file)
# build targets
post_fix = 'st' if env['single_thread'] else 'mt'
for main_cpp in Glob('./target/*cpp'):
name = main_cpp.name[:-4]
main_obj = './%s/target/%s.o' % (obj_dir, name)
main_bin = './build/%s/%s_%s' % (build_dir, name, post_fix)
env.Object(main_obj, main_cpp)
env.Program(main_bin, objs + [main_obj])
If you want to use two compilers in the same SConscript, you should have an Environment() for each, and use where appropriate.
Related
import pandas as pd
df_reader = pd.read_json('Clothing_Shoes_and_Jewelry.json', lines = True ,chunksize = 1000000 )
counter = 1
for chunk in df_reader:
new_df = pd.DataFrame(chunk[['overall', 'reviewText','summary']])
new_df1 = new_df[new_df['overall' == 1]].sample(4000)
new_df2 = new_df[new_df['overall' == 2]].sample(4000)
new_df3 = new_df[new_df['overall' == 4]].sample(4000)
new_df4 = new_df[new_df['overall' == 5]].sample(4000)
new_df5 = new_df[new_df['overall' == 3]].sample(8000)
new_df6 = pd.concat([new_df1, new_df2, new_df3, new_df4, new_df5], axis = 0,ignore_index = True)
new_df6.to_csv(str(counter)+'.csv', index = False)
counter = counter+1
from glob import glob
#the glob module is used to retrieve the files
#or pathnames matching a pattern
filenames = glob('*.csv')
#['1.csv','2.csv',..........,'33.csv']
dataframes = []
for f in filenames:
dataframes.append(pd.read_csv(f))
#[..........]
finaldf = pd.concat(dataframes, axis = 0, ignore_index = True)
finaldf.to_csv("balanced_reviews.csv", index = False)
#---------------------------------
df = pd.read_csv('balanced_reviews.csv')
I get a ValueError: Expected object or value when getting a chunk from df_reader
The error usually occurs when either the file is not referenced correctly or if your JSON in itself is malformed. Like #Cimbali mentioned above - if are allowed to copy a sample JSON then it would help additionally. Meantime check these answers from a related question from Stackoverflow itself earlier - [(ValueError: Expected object or value when reading json as pandas dataframe)]
As I'm new and learning python, exploring different ways to build a config file for python based framework.
I have come across using-built-in-data-structure-complicated-py , couldn't understand main.py . Could you help me with how main.py should look like and how the variables from config.py can be accessed in main.py.
# config.py
class Config:
APP_NAME = 'myapp'
SECRET_KEY = 'secret-key-of-myapp'
ADMIN_NAME = 'administrator'
AWS_DEFAULT_REGION = 'ap-northeast-2'
STATIC_PREFIX_PATH = 'static'
ALLOWED_IMAGE_FORMATS = ['jpg', 'jpeg', 'png', 'gif']
MAX_IMAGE_SIZE = 5242880 # 5MB
class DevelopmentConfig(Config):
DEBUG = True
AWS_ACCESS_KEY_ID = 'aws-access-key-for-dev'
AWS_SECERT_ACCESS_KEY = 'aws-secret-access-key-for-dev'
AWS_S3_BUCKET_NAME = 'aws-s3-bucket-name-for-dev'
DATABASE_URI = 'database-uri-for-dev'
class TestConfig(Config):
DEBUG = True
TESTING = True
AWS_ACCESS_KEY_ID = 'aws-access-key-for-test'
AWS_SECERT_ACCESS_KEY = 'aws-secret-access-key-for-test'
AWS_S3_BUCKET_NAME = 'aws-s3-bucket-name-for-test'
DATABASE_URI = 'database-uri-for-dev'
class ProductionConfig(Config):
DEBUG = False
AWS_ACCESS_KEY_ID = 'aws-access-key-for-prod'
AWS_SECERT_ACCESS_KEY = 'aws-secret-access-key-for-prod'
AWS_S3_BUCKET_NAME = 'aws-s3-bucket-name-for-prod'
DATABASE_URI = 'database-uri-for-dev'
class CIConfig:
SERVICE = 'travis-ci'
HOOK_URL = 'web-hooking-url-from-ci-service'
# main.py
import sys
import config
...
if __name__ == '__main__':
env = sys.argv[1] if len(sys.argv) > 2 else 'dev'
if env == 'dev':
app.config = config.DevelopmentConfig
elif env == 'test':
app.config = config.TestConfig
elif env == 'prod':
app.config = config.ProductionConfig
else:
raise ValueError('Invalid environment name')
app.ci = config.CIConfig
What is app.config and app.ci ? How is it being used ?
And also, what all other best possible pythonic way to manage config files ?
If I have multiple set of profiles/credentials (username-password), how do i manage them ?
Any possible encryption to files containing credentials ?
Will be of great learning to me.
Here is a small example of how you could use config files
class Config:
APP_NAME='myapp'
ADMIN='admin'
class DevelopmentConfig(Config):
DEBUG = True
ADMIN = 'dev_admin'
class ProductionConfig(Config):
DEBUG = False
def main():
config = ProductionConfig # Change to DevelopmentConfig to experiment
# You may now use your config where you want
print(config.DEBUG)
print(config.ADMIN)
if __name__ == "__main__":
main()
This example does not use command line arguments like your example but should give you a good idea of building config files and using them.
In your example app.ci refers to configuration for continuous integration(CI) environment.
I want to have a service 'save_readings' that automatically saves data from a rostopic into a file. But each time the service gets called, it doesn't save any file.
I've tried to run those saving-file code in python without using a rosservice and the code works fine.
I don't understand why this is happening.
#!/usr/bin/env python
# license removed for brevity
import rospy,numpy
from std_msgs.msg import String,Int32MultiArray,Float32MultiArray,Bool
from std_srvs.srv import Empty,EmptyResponse
import geometry_msgs.msg
from geometry_msgs.msg import WrenchStamped
import json
# import settings
pos_record = []
wrench_record = []
def ftmsg2listandflip(ftmsg):
return [ftmsg.wrench.force.x,ftmsg.wrench.force.y,ftmsg.wrench.force.z, ftmsg.wrench.torque.x,ftmsg.wrench.torque.y,ftmsg.wrench.torque.z]
def callback_pos(data):
global pos_record
pos_record.append(data.data)
def callback_wrench(data):
global wrench_record
ft = ftmsg2listandflip(data)
wrench_record.append([data.header.stamp.to_sec()] + ft)
def exp_listener():
stop_sign = False
rospy.Subscriber("stage_pos", Float32MultiArray, callback_pos)
rospy.Subscriber("netft_data", WrenchStamped, callback_wrench)
rospy.spin()
def start_read(req):
global pos_record
global wrench_record
pos_record = []
wrench_record = []
return EmptyResponse()
def save_readings(req):
global pos_record
global wrench_record
filename = rospy.get_param('save_file_name')
output_data = {'pos_list':pos_record, 'wrench_list': wrench_record }
rospy.loginfo("output_data %s",output_data)
with open(filename, 'w') as outfile: # write data to 'data.json'
print('dumping json file')
json.dump(output_data, outfile) #TODO: find out why failing to save the file.
outfile.close()
print("file saved")
rospy.sleep(2)
return EmptyResponse()
if __name__ == '__main__':
try:
rospy.init_node('lisener_node', log_level = rospy.INFO)
s_1 = rospy.Service('start_read', Empty, start_read)
s_1 = rospy.Service('save_readings', Empty, save_readings)
exp_listener()
print ('mylistener ready!')
except rospy.ROSInterruptException:
pass
Got it. I need to specify a path for the file to be saved.
save_path = '/home/user/catkin_ws/src/motionstage/'
filename = save_path + filename
I was trying to do a program that would read all the pdfs in a folder and convert all of them to htmls, for example file1.pdf, file2.pdf, file3.pdf then run the program and create something like file1.html, file2.html, file3.htm. Without losing the main pdf of course, untill now I only could do it to one file, I don't how to make for every file on the folder with a loop.
Here's my code:
import shlex
import subprocess
import os
import platform
def run(command):
if platform.system() != 'Windows':
args = shlex.split(command)
else:
args = command
s = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, errors = s.communicate()
return s.returncode == 0, output, errors
# Change this to your PDF file base directory
base_directory = 'C:\\PROJECT\\pdfs'
if not os.path.isdir(base_directory):
print "%s is not a directory" % base_directory
exit(1)
# Change this to your pdf2htmlEX executable location
#bin_path = 'C:\\Python27\\pdf2htmlEX\\pdf2htmlEX.exe'
#if not os.path.isfile(bin_path):
# print "Could not find %s" % bin_path
# exit(1)
for dir_path, dir_name_list, file_name_list in os.walk(base_directory):
for file_name in file_name_list:
# If this is not a PDF file
if not file_name.endswith('.pdf'):
# Skip it
continue
file_path = os.path.join(dir_path, file_name)
# Convert your PDF to HTML here
args = (file_name, file_path)
success, output, errors = run("pdf2txt.py -o %s.html %s" %args)
if not success:
print "Could not convert %s to HTML" % file_path
print "%s" % errors
This is a complete solution that uses os.walk and pdf2htmlEX:
import shlex
import subprocess
import os
import platform
def run(command):
if platform.system() != 'Windows':
args = shlex.split(command)
else:
args = command
s = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, errors = s.communicate()
return s.returncode == 0, output, errors
# Change this to your PDF file base directory
base_directory = 'C:\\Users\\Admin\\Desktop\\learningpython\\PROJECT'
if not os.path.isdir(base_directory):
print "%s is not a directory" % base_directory
exit(1)
# Change this to your pdf2htmlEX executable location
bin_path = 'C:\\Python27\\pdf2htmlEX-master\\pdf2htmlEX.exe'
if not os.path.isfile(bin_path):
print "Could not find %s" % bin_path
exit(1)
for dir_path, dir_name_list, file_name_list in os.walk(base_directory):
for file_name in file_name_list:
# If this is not a PDF file
if not file_name.endswith('.pdf'):
# Skip it
continue
file_path = os.path.join(dir_path, file_name)
# Convert your PDF to HTML here
args = (bin_path, dir_path, file_path)
success, output, errors = run("%s --dest-dir %s %s" % args)
if not success:
print "Could not convert %s to HTML" % file_path
print "%s" % errors
to compile pdf2html project by https://github.com/coolwanglu/pdf2htmlEX, and system call cmd pdf2html by python
I was wondering how to open or read a binary file that has been saved in octave, with the extension .SAVE? I have tried opening it with MATLAB, using the 'load' function in octave, but nothing seems to be working. I'm trying to understand someone else's code and they have saved the output of a simulation in this file.
The Octave binary format is briefly described in the comments before the function read_binary_data() in load-save.cc.
Are you sure the file is in "Octave binary format". The file ending ".SAVE" can be choosen arbitrary so this could also be CSV, gzipped...
You can run "file yourfile.SAFE" and paste the output or check the first bytes of your file if they are "Octave-1-L" or "Octave-1-B".
If you want to use these files from another program than GNU Octave I would suggest loading it in Octave and safe it in an other format. See "help save" for a list of supported formats.
EDIT:
Because the initial poster asked: Of course you can use GNU Octave from the terminal (no need for the GUI and I don't now to which software part you refer when you are using the phrase "octave GUI", see here Octave FAQ). Just install it for your used platform install instructions on wiki.octave.org and run it.
Code for reading octave binary save files in python 2/3.
Tested on:
strings
single and double precision real and complex floats
various integer types
scalar, matrix and array
Unsupported:
struct
cell array
...
Python code:
# This code is public domain
from __future__ import print_function
import sys
from collections import OrderedDict
import numpy as np
if sys.version_info[0] > 2:
def tostr(s):
return s.decode('utf8')
def decode(s, encoding='utf8'):
return s.decode(encoding)
STR_ENCODING = 'utf8'
else:
def tostr(s):
return s
def decode(s, encoding='utf8'):
return unicode(s, encoding)
STR_ENCODING = None
DATA_TYPES = {
1: "scalar",
2: "matrix",
3: "complex scalar",
4: "complex matrix",
5: "old_string",
6: "range",
7: "string",
}
TYPE_CODES = {
0: "u1",
1: "u2",
2: "u4",
3: "i1",
4: "i2",
5: "i4",
6: "f4",
7: "f8",
8: "u8",
9: "i8",
}
DTYPES = {k: np.dtype(v) for k, v in TYPE_CODES.items()}
def loadoct(fd, encoding=STR_ENCODING):
"""
Read an octave binary file from the file handle fd, returning
an array of structures. If encoding is not None then convert
strings from bytes to unicode. Default is STR_ENCODING, which
is utf8 for python 3 and None for python 2, yielding arrays
of type str in each dialect.
"""
magic = fd.read(10)
assert(magic == b"Octave-1-L" or magic == b"Octave-1-B")
endian = "<" if magic[-1:] == b"L" else ">"
# Float type is 0: IEEE-LE, 1: IEEE-BE, 2: VAX-D, 3: VAX-G, 4: Cray
# Not used since Octave assumes IEEE format floats.
_float_format = fd.read(1)
len_dtype = np.dtype(endian + "i4")
def read_len():
len_bytes = fd.read(4)
if not len_bytes:
return None
return np.frombuffer(len_bytes, len_dtype)[0]
table = OrderedDict()
while True:
name_length = read_len()
if name_length is None: # EOF
break
name = tostr(fd.read(name_length))
doc_length = read_len()
doc = tostr(fd.read(doc_length)) if doc_length else ''
is_global = bool(ord(fd.read(1)))
data_type = ord(fd.read(1))
if data_type == 255:
type_str = tostr(fd.read(read_len()))
else:
type_str = DATA_TYPES[data_type]
#print("reading", name, type_str)
if type_str.endswith("scalar"):
if type_str == "scalar":
dtype = DTYPES[ord(fd.read(1))]
elif type_str == "complex scalar":
_ = fd.read(1)
dtype = np.dtype('complex128')
elif type_str == "float complex scalar":
_ = fd.read(1)
dtype = np.dtype('complex64')
else:
dtype = np.dtype(type_str[:-7])
dtype = dtype.newbyteorder(endian)
data = np.frombuffer(fd.read(dtype.itemsize), dtype)
table[name] = data[0]
elif type_str.endswith("matrix"):
ndims = read_len()
if ndims < 0:
ndims = -ndims
dims = np.frombuffer(fd.read(4*ndims), len_dtype)
else:
dims = (ndims, read_len())
count = np.prod(dims)
if type_str == "matrix":
dtype = DTYPES[ord(fd.read(1))]
elif type_str == "complex matrix":
_ = fd.read(1)
dtype = np.dtype('complex128')
elif type_str == "float complex matrix":
_ = fd.read(1)
dtype = np.dtype('complex64')
else:
dtype = np.dtype(type_str[:-7])
dtype = dtype.newbyteorder(endian)
data = np.frombuffer(fd.read(count*dtype.itemsize), dtype)
# Note: Use data.copy() to make a modifiable array.
table[name] = data.reshape(dims, order='F')
elif type_str == "old_string":
data = fd.read(read_len())
if encoding is not None:
data = decode(data, encoding)
table[name] = data
elif type_str in ("string", "sq_string"):
nrows = read_len()
if nrows < 0:
ndims = -nrows
dims = np.frombuffer(fd.read(4*ndims), len_dtype)
count = np.prod(dims)
fortran_order = np.frombuffer(fd.read(count), dtype='uint8')
c_order = np.ascontiguousarray(fortran_order.reshape(dims, order='F'))
data = c_order.view(dtype='|S'+str(dims[-1]))
if encoding is not None:
data = np.array([decode(s, encoding) for s in data.flat])
table[name] = data.reshape(dims[:-1])
else:
data = [fd.read(read_len()) for _ in range(nrows)]
if encoding is not None:
data = [decode(s, encoding) for s in data]
table[name] = np.array(data)
else:
raise NotImplementedError("unknown octave type "+type_str)
#print("read %s:%s"%(name, type_str), table[name])
return table
def _dump(filename, encoding=STR_ENCODING):
import gzip
if filename.endswith('.gz'):
with gzip.open(filename, 'rb') as fd:
table = loadoct(fd, encoding)
else:
with open(filename, 'rb') as fd:
table = loadoct(fd, encoding)
for k, v in table.items():
print(k, v)
if __name__ == "__main__":
#_dump(sys.argv[1], encoding='utf8') # unicode
#_dump(sys.argv[1], encoding=None) # bytes
_dump(sys.argv[1]) # str, encoding=STR_ENCODING