How can i mock a new final class - junit

My code is as follows.
import org.xbill.DNS.Lookup;
import org.xbill.DNS.Record;
import org.xbill.DNS.Type;
public class MailCheckService {
public Record[] mailHostValidate(String email, MailEntity mailEntity) {
Record[] records = null;
String hostName = email.split("#")[1];
try {
Lookup lookup = new Lookup(hostName, Type.MX);
lookup.run();
records = lookup.getAnswers();
} catch (IOException e) {
throw e;
}
return records;
}
}
how can i mock the Lookup class or rewrite it.
if possible, please provide the version of jar.
Thanks and best regards.

Related

Spring Netflix Zuul: API-Gateway - Transforming a JSON request

I am currently building an API gateway for a new microservices system, using the Spring Netflix Zuul library.
So far my gateway contains PRE and POST filters that intercept the requests and perform the required logic, etc.
One thing that I see is that REST calls to specific microservices require invoking an API endpoint (either GET or POST) containing JSON payload data that is very complex.
For an end-user sending a request to a microservice containing this JSON would not be user friendly.
I had an idea such that the API gateway act as a mediator, where the user can submit a more "simplified/ user-friendly" JSON to the API gateway, which will transform the JSON payload with the correct "complex" JSON structure that the target microservice can understand in order to handle the request efficiently.
My understanding of how Netflix Zuul is that this can be done by creating a RouteFilter and then including this logic here.
Can anyone explain if (or how) this transformation could be done using Netflix Zuul?
Any advice is appreciated.
Thanks.
No doubt you can do it with Zuul, i am currently trying to do almost the same. i'd suggest you take a look at this repo :
sample-zuul-filters
and the official doc on github.
Filters have to extend ZuulFilter and implement the following methods :
/**
*return a string defining when your filter must execute during zuul's
*lyfecyle ('pre'/'post' routing
**/
#Override
public String filterType(){
return 'pre'; // run this filter before sending the final request
}
/**
* return an int describing the order that the filter should run on,
* (relative to the other filters and the current 'pre' or 'post' context)
**/
#Override
public int filterOrder {
return 1; //this filter runs first in a pre-request context
}
/**
* return a boolean indicating if the filter should run or not
**/
#Override
public boolean shouldFilter() {
RequestContext ctx = RequestContext.getCurrentContext();
if(ctx.getRequest().getRequestURI().equals("/theRouteIWantToFilter"))
{
return true;
}
else {
return false;
}
}
/**
* After all the config stuffs you can set what your filter actually does
* here. This is where your json logic goes.
*/
#Override
public Object run() {
try {
RequestContext ctx = RequestContext.getCurrentContext();
HttpServletRequest request = ctx.getRequest();
InputStream stream = ctx.getResponseDataStream();
String body = StreamUtils.copyToString(stream, Charset.forName("UTF-8"));
// transform your json and send it to the api.
ctx.setResponseBody(" Modified body : " + body);
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
I am not sure my answer is 100% accurate since i am working on it but it's a start.
I've done payload conversion in pre filter but this should work in route filter as well. Use com.netflix.zuul.http.HttpServletRequestWrapper to capture and modify the original request payload before forwarding the request to target microservice.
Sample code:
package com.sample.zuul.filters.pre;
import com.google.common.io.CharStreams;
import com.netflix.zuul.ZuulFilter;
import com.netflix.zuul.context.RequestContext;
import com.netflix.zuul.http.HttpServletRequestWrapper;
import com.netflix.zuul.http.ServletInputStreamWrapper;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
import javax.servlet.ServletInputStream;
import javax.servlet.http.HttpServletRequest;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
public class JsonConverterFilter extends ZuulFilter {
#Override
public String filterType() {
return "pre";
}
#Override
public int filterOrder() {
return 0; // Set it to whatever the order of your filter is
}
#Override
public boolean shouldFilter() {
return true;
}
#Override
public Object run() {
RequestContext context = RequestContext.getCurrentContext();
HttpServletRequest request = new HttpServletRequestWrapper(context.getRequest());
String requestData = null;
JSONParser jsonParser = new JSONParser();
JSONObject requestJson = null;
try {
if (request.getContentLength() > 0) {
requestData = CharStreams.toString(request.getReader());
}
if (requestData == null) {
return null;
}
requestJson = (JSONObject) jsonParser.parse(requestData);
} catch (Exception e) {
//Add your exception handling code here
}
JSONObject modifiedRequest = modifyJSONRequest(requestJson);
final byte[] newRequestDataBytes = modifiedRequest.toJSONString().getBytes();
request = getUpdatedHttpServletRequest(request, newRequestDataBytes);
context.setRequest(request);
return null;
}
private JSONObject modifyJSONRequest(JSONObject requestJSON) {
JSONObject jsonObjectDecryptedPayload = null;
try {
jsonObjectDecryptedPayload = (JSONObject) new JSONParser()
.parse("Your new complex json");
} catch (ParseException e) {
e.printStackTrace();
}
return jsonObjectDecryptedPayload;
}
private HttpServletRequest getUpdatedHttpServletRequest(HttpServletRequest request, final byte[] newRequestDataBytes) {
request = new javax.servlet.http.HttpServletRequestWrapper(request) {
#Override
public BufferedReader getReader() throws IOException {
return new BufferedReader(
new InputStreamReader(new ByteArrayInputStream(newRequestDataBytes)));
}
#Override
public ServletInputStream getInputStream() throws IOException {
return new ServletInputStreamWrapper(newRequestDataBytes);
}
/*
* Forcing any calls to HttpServletRequest.getContentLength to return the accurate length of bytes
* from a modified request
*/
#Override
public int getContentLength() {
return newRequestDataBytes.length;
}
};
return request;
}
}

List down multiple files in datatable to download [duplicate]

This question already has answers here:
Show JDBC ResultSet in HTML in JSP page using MVC and DAO pattern
(6 answers)
Closed 6 years ago.
I've MySQL table which contains list of files I need to populate in a table on my jsp page. I've attached the screen shot of table contents. I was referring example given here. But example given is just for one file. It would be really helpful if experts can point me to an example to follow...
Thank you very much in advance.
First Step:
Database to Controller:
Grab Data from your database by creating DAO(data access Object)
For Example:
public class Document {
String document_name;
String document_date;
String document_RevisedName;
public String getDocument_name() {
return document_name;
}
public void setDocument_name(String document_name) {
this.document_name = document_name;
}
public String getDocument_date() {
return document_date;
}
public void setDocument_date(String document_date) {
this.document_date = document_date;
}
public String getDocument_RevisedName() {
return document_RevisedName;
}
public void setDocument_RevisedName(String document_RevisedName) {
this.document_RevisedName = document_RevisedName;
}
}
Create a Interface Document_DAO:
public interface Document_DAO {
ArrayList<Document>getAllDocuments();
}
Implement above Interface to a Implementation Class Document_DAO_Imp
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import com.mysql.jdbc.Connection;
import Utils.DataSource;
import dao_class.Class;
public class Docuemnt_DAO_IMp implements Document_DAO{
Connection conn = null;
DataSource dataSource = new DataSource();
PreparedStatement preparedStatement = null;
ResultSet resultSet = null;
String query= "";
ArrayList<Document> documentList= new ArrayList<>();
#Override
public ArrayList<Document> getAllDocuments() {
conn = (Connection) dataSource.createConnection();
try{
query = "SELECT * FROM youTableName";
preparedStatement = conn.clientPrepareStatement(query);
resultSet = preparedStatement.executeQuery();
while (resultSet.next()) {
Document doc= new Document();
doc.setDocument_date(resultSet.getString("youRowName"));
doc.setDocument_RevisedName(resultSet.getString("youRowName"));
documentList.add(doc);
}
}
catch(SQLException sqlE){
System.out.println(sqlE.toString());
}
finally {
try {
resultSet.close();
preparedStatement.close();
conn.close();
} catch (SQLException e) {
System.out.println(e.toString());
}
}
return documentList;
}
}
Finally:
Call above DAO from your Controller/Servlet.
ArrayList<Document> docList = new ArrayList<>();
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
Document_DAO dao = new Document_DAO_Imp();
docList = dao.getAllDocuments();
JsonObject jsonObject = new JsonObject();
JsonElement jsonElement = null;
if (classList.size()!=0) {
jsonElement = new Gson().toJsonTree(docList);
jsonObject.add("docList", jsonElement);
jsonObject.addProperty("success", "true");
}
else{
jsonObject.addProperty("success", "true");
}
response.getWriter().println(jsonObject);
}
}
Till Now You have successfully,Grabed data from DB and Send to you JSP/HTML Page.
Make Sure to add GSON a google library to add to you built path and WEB-INF/lib folder.
Now Using jquery we can parse result to page and can also download them in XLS format.
To Fetch Data using Jquery Ajax you can either provide a event or you can load data on page load.
$(document).load(function(){
});
$.ajax({
type:"get",
url : "DocumentServlet",
dataType: 'json',
// if received a response from the server
success : function(data, textStatus, jqXHR) {
console.log("Getting Document Information!");
if (data.success == 'true') {
data = data.docList;
console.log(data);
$('.table').html("");
$.each(data, function(index, value) {
var eachrow = "<tr><td>"+value.documentName
+"</td><td>"+value.documentRevisedName
+"</td></tr>";
$('.table').append(eachrow);
});
}else{
console.log("failure");
}
},
error : function(jqXHR, textStatus, errorThrown) {
}
});
One you successfully populate JSON array to tables.
Then you can use any external Library to convert data to XLS and download.
First use this Structure to create a page,,Then i'll provide code to download

How do I load a xml in Optaplanner

I have created a MySQL database with entries similar to nurse roster, Now i need to send this data to optaplanner deployed on my server. To which file do i need to send it in the optaplanner folder deployed on server to get the results displayed on my webpage.
I'm using Xstream to generate XML file.
Can any one please give me brief on how to make this functionality work and get me the desired results.
The whole dataset serialization from and to XML is part of optaplanner-examples: OptaPlanner itself doesn't provide or require any serialization format. That being said, optaplanner-examples includes the following serialization formats:
Every example: XStream XML format in data directories unsolved and solved. The format is defined by the XStream annotations (#XStreamAlias etc) on the domain classes. In some cases the XML format is too verbose, causing OutOfMemoryError, for example for the big MachineReassignment B datasets.
Most examples: Competition specific TXT format in data directories import and export. The format is defined by the competition (see docs). In the examples GUI, click on button Import to load them.
I suggested you to read the final chapter in optaplanner manual / documentation :
Chapter 15. Integration
If your data source is a database, you can annotate your domain POJO's with JPA annotations. I think it will be a waste if you still store the data from database to xml file then feed the xml file to optaplanner, it will be more wise to feed your POJO objects to optaplanner directly.
I don't know what your web application technology, but the general algorithm will be like this :
Get POJO object data from your database (you can use JPA etc.)
Construct the solution class object
Feed the solution object to optaplanner solver
Get the best solution from optaplanner solver and present it to your user in your user desire.
Take a look at CloudBalancingHelloWorld.java class to get the basic idea. Hope this can help you.
package com.jdbcxml;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import org.w3c.dom.Document;
class EmployeeDAO
{
private Connection conn = null;
static
{
try
{
Class.forName("com.mysql.jdbc.Driver");
}
catch (Exception e)
{
e.printStackTrace();
}
}
public EmployeeDAO()
{
String url = "jdbc:mysql://50.62.23.184:3306/gtuser";
String userId = "gtuser1";
String passWord = "";
try
{
conn = DriverManager.getConnection(url, userId, passWord);
}
catch (SQLException e)
{
e.printStackTrace();
}
}
public void finalize()
{
try
{
conn.close();
}
catch (SQLException e)
{
e.printStackTrace();
}
}
public Document getCustomerList()
{
Document doc = null;
try
{
Statement stmt = conn.createStatement();
ResultSet rs = stmt.executeQuery("SELECT * from t7_users");
doc = JDBCUtil.toDocument(rs);
rs.close();
stmt.close();
}
catch (Exception e)
{
e.printStackTrace();
}
return doc;
}
public String getCustomerListAsString()
{
String xml = null;
try
{
Statement stmt = conn.createStatement();
ResultSet rs = stmt.executeQuery("SELECT * from t7_users");
xml = JDBCUtil.toXML(rs);
rs.close();
stmt.close();
}
catch (Exception e)
{
e.printStackTrace();
}
return xml;
}
public static void main(String argv[]) throws Exception
{
EmployeeDAO dao = new EmployeeDAO();
String xml = dao.getCustomerListAsString();
System.out.println(xml);
Document doc = dao.getCustomerList();
System.out.println(doc);
//PrintWriter out = new PrintWriter(new FileWriter("output.txt"));
//out.write(doc);;
//out.close();
}
}
Here the pseudo code (I never actually use JSP, I currently using GWT) to give you the basic idea, but please do remember these notes :
I think it will be a waste to save your POJO objects to xml then use XStream library to extract it again to POJO objects. In optaplanner example, they use it because it only need a static data and for the sake of example.
I assume that you already create your approriate domain class model that fit your planning problem domain. Because this is one of the core concept of optaplanner.
In method generateCustomerRoster, you should put your own logic to convert your customer POJO objects to planning solution object.
Hope this can help you and lead you to finish your job. Thanks & Regards.
package com.jdbcxml;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import org.w3c.dom.Document;
public class EmployeeDAO
{
private Connection conn = null;
static
{
try
{
Class.forName("com.mysql.jdbc.Driver");
}
catch (Exception e)
{
e.printStackTrace();
}
}
public EmployeeDAO()
{
String url = "jdbc:mysql://50.62.23.184:3306/gtuser";
String userId = "gtuser1";
String passWord = "";
try
{
conn = DriverManager.getConnection(url, userId, passWord);
}
catch (SQLException e)
{
e.printStackTrace();
}
}
public void finalize()
{
try
{
conn.close();
}
catch (SQLException e)
{
e.printStackTrace();
}
}
public List<Customer> getCustomerList()
{
Document doc = null;
try
{
Statement stmt = conn.createStatement();
ResultSet rs = stmt.executeQuery("SELECT * from t7_users");
doc = JDBCUtil.toDocument(rs);
rs.close();
stmt.close();
}
catch (Exception e)
{
e.printStackTrace();
}
return doc;
}
public CustomerRoster generateCustomerRoster(List<Customer> rawData) {
CustomerRoster result = new CustomerRoster();
// here you should write your logic to generate Customer Roster data from your Raw Data (Customer)
return result;
}
public static void main(String argv[]) throws Exception
{
// Build the Solver
SolverFactory solverFactory = SolverFactory.createFromXmlResource("yourSolverConfig.xml");
Solver solver = solverFactory.buildSolver();
// Load your problem
EmployeeDAO dao = new EmployeeDAO();
List<Customer> listCustomer = dao.getCustomerList();
CustomerRoster unsolvedCustomerRoster = generateCustomerRoster(listCustomer);
// Solve the problem
solver.solve(unsolvedCustomerRoster);
CustomerRoster solvedCustomerRoster = (CustomerRoster) solver.getBestSolution();
// Display the result
DataGrid grid = new DataGrid(solvedCustomerRoster); // Just change this line code to display to any of your view component
}
}

how to save apache spark schema output in mysql database

Can anyone please tell me if there is any way in apache spark to store a JavaRDD on mysql database? I am taking input from 2 csv files and then after doing join operations on their contents I need to save the output(the output JavaRDD) in the mysql database. I am already able to save the output successfully on hdfs but I am not finding any information related to apache Spark-MYSQL connection. Below I am posting the code for spark sql. This might serve as a reference to those who are looking for an example for spark-sql.
package attempt1;
import java.io.Serializable;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.api.java.JavaSQLContext;
import org.apache.spark.sql.api.java.JavaSchemaRDD;
import org.apache.spark.sql.api.java.Row;
public class Spark_Mysql {
#SuppressWarnings("serial")
public static class CompleteSample implements Serializable {
private String ASSETNUM;
private String ASSETTAG;
private String CALNUM;
public String getASSETNUM() {
return ASSETNUM;
}
public void setASSETNUM(String aSSETNUM) {
ASSETNUM = aSSETNUM;
}
public String getASSETTAG() {
return ASSETTAG;
}
public void setASSETTAG(String aSSETTAG) {
ASSETTAG = aSSETTAG;
}
public String getCALNUM() {
return CALNUM;
}
public void setCALNUM(String cALNUM) {
CALNUM = cALNUM;
}
}
#SuppressWarnings("serial")
public static class ExtendedSample implements Serializable {
private String ASSETNUM;
private String CHANGEBY;
private String CHANGEDATE;
public String getASSETNUM() {
return ASSETNUM;
}
public void setASSETNUM(String aSSETNUM) {
ASSETNUM = aSSETNUM;
}
public String getCHANGEBY() {
return CHANGEBY;
}
public void setCHANGEBY(String cHANGEBY) {
CHANGEBY = cHANGEBY;
}
public String getCHANGEDATE() {
return CHANGEDATE;
}
public void setCHANGEDATE(String cHANGEDATE) {
CHANGEDATE = cHANGEDATE;
}
}
#SuppressWarnings("serial")
public static void main(String[] args) throws Exception {
JavaSparkContext ctx = new JavaSparkContext("local[2]", "JavaSparkSQL");
JavaSQLContext sqlCtx = new JavaSQLContext(ctx);
JavaRDD<CompleteSample> cs = ctx.textFile("C:/Users/cyg_server/Documents/bigDataExample/AssetsImportCompleteSample.csv").map(
new Function<String, CompleteSample>() {
public CompleteSample call(String line) throws Exception {
String[] parts = line.split(",");
CompleteSample cs = new CompleteSample();
cs.setASSETNUM(parts[0]);
cs.setASSETTAG(parts[1]);
cs.setCALNUM(parts[2]);
return cs;
}
});
JavaRDD<ExtendedSample> es = ctx.textFile("C:/Users/cyg_server/Documents/bigDataExample/AssetsImportExtendedSample.csv").map(
new Function<String, ExtendedSample>() {
public ExtendedSample call(String line) throws Exception {
String[] parts = line.split(",");
ExtendedSample es = new ExtendedSample();
es.setASSETNUM(parts[0]);
es.setCHANGEBY(parts[1]);
es.setCHANGEDATE(parts[2]);
return es;
}
});
JavaSchemaRDD complete = sqlCtx.applySchema(cs, CompleteSample.class);
complete.registerAsTable("cs");
JavaSchemaRDD extended = sqlCtx.applySchema(es, ExtendedSample.class);
extended.registerAsTable("es");
JavaSchemaRDD fs= sqlCtx.sql("SELECT cs.ASSETTAG, cs.CALNUM, es.CHANGEBY, es.CHANGEDATE FROM cs INNER JOIN es ON cs.ASSETNUM=es.ASSETNUM;");
JavaRDD<String> result = fs.map(new Function<Row, String>() {
public String call(Row row) {
return row.getString(0);
}
});
result.saveAsTextFile("hdfs://path/to/hdfs/dir-name"); //instead of hdfs I need to save it on mysql database, but I am not able to find any Spark-MYSQL connection
}
}
Here at the end I am saving the result successfully in HDFS. But now I want to save into MYSQL database. Kindly help me out. Thanks
There are two approaches you can use for writing your results back to the database. One is to use something like DBOutputFormat and configure that, and the other is to use foreachPartition on the RDD you want to save and pass in a function which creates a connection to MySQL and writes the result back.
Here is an example using DBOutputFormat.
Create a class that represents your table row -
public class TableRow implements DBWritable
{
public String column1;
public String column2;
#Override
public void write(PreparedStatement statement) throws SQLException
{
statement.setString(1, column1);
statement.setString(2, column2);
}
#Override
public void readFields(ResultSet resultSet) throws SQLException
{
throw new RuntimeException("readFields not implemented");
}
}
Then configure your job and write a mapToPair function. The value doesn't appear to be used. If anyone knows, please post a comment.
String tableName = "YourTableName";
String[] fields = new String[] { "column1", "column2" };
JobConf job = new JobConf();
DBConfiguration.configureDB(job, "com.mysql.jdbc.Driver", "jdbc:mysql://localhost/DatabaseNameHere", "username", "password");
DBOutputFormat.setOutput(job, tableName, fields);
// map your rdd into a table row
JavaPairRDD<TableRow, Object> rows = rdd.mapToPair(...);
rows.saveAsHadoopDataset(job);

Insert data into MySQL from Excel via Eclipse using Selenium and Testng

I got struck while trying to do the coding part. Following is the part which i tried. I'm in need to know the #Test portion. I don't know to write the Selenium code to part to fetch the data from excel and to insert the data in MySQL.
import org.testng.annotations.BeforeTest;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import com.thoughtworks.selenium.SeleneseTestBase;
import java.io.File;
import java.sql.*;
import jxl.*;
public class testng extends SeleneseTestBase{
#BeforeTest
public static void connection()
{
try {
Class.forName("com.mysql.jdbc.Driver");
} catch (ClassNotFoundException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
}
#BeforeTest
public static void MysqlConnection() //we need to add the Dataprovider name[name="DP"] to pass the data from excel sheet
{
try {
Connection conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/test","root", "admin");
Statement stmt = conn.createStatement();
} catch (SQLException e) {
e.printStackTrace();
return;
}
System.out.println("Testing Testfile1");
}
#DataProvider(name = "DP1")
public Object[][] createData1() throws Exception{
Object[][] retObjArr=getTableArray("D:\\sakthi\\selenium\\data3.xls","DataPool", "mysqldata");
return(retObjArr);
}
#Test (dataProvider = "DP1")
public void testDataProviderExample(int id, int plist_id, String email) throws Exception {
//This is the part where i'm in need of help
}
public String[][] getTableArray(String xlFilePath, String sheetName, String tableName) throws Exception{
String[][] tabArray=null;
Workbook workbook = Workbook.getWorkbook(new File(xlFilePath));
Sheet sheet = workbook.getSheet(sheetName);
int startRow,startCol, endRow, endCol,ci,cj;
Cell tableStart=sheet.findCell(tableName);
startRow=tableStart.getRow();
startCol=tableStart.getColumn();
Cell tableEnd= sheet.findCell(tableName, startCol+1,startRow+1, 100, 64000, false);
endRow=tableEnd.getRow();
endCol=tableEnd.getColumn();
System.out.println("startRow="+startRow+", endRow="+endRow+", " +"startCol="+startCol+", endCol="+endCol);
tabArray=new String[endRow-startRow-1][endCol-startCol-1];
ci=0;
for (int i=startRow+1;i<endRow;i++,ci++){
cj=0;
for (int j=startCol+1;j<endCol;j++,cj++){
tabArray[ci][cj]=sheet.getCell(j,i).getContents();
}
}
return(tabArray);
}
}
Selenium is used to control a web browser, you don't need it open an excel file and write to a DB.