object scalaSlickTest extends App {
val parsedConfig = ConfigFactory.parseFile(newFile("src/main/resources/Application.conf"))
val conf = ConfigFactory.load(parsedConfig)
val db = Database.forConfig("mydb")
val lines = new ArrayBuffer[ Any ]()
val employees = TableQuery[ Employees ]
}
I am getting the exception below:
You'll need to pass your loaded config
val db = Database.forConfig("mydb", config)
Otherwise forConfig uses the default config file, which is application.conf (case matters).
Related
I am new to Android, I am trying to create 3 json files in device documents directory on app installation if the files do not already exist which they wont.
Problem is that the below sequence works in creating files successfully
device restart
app install
But following sequence is not able to create the files
app uninstall
manual file deletetion from devise
app install
what is going wrong?
Below is the code i am using to create file:
private fun createDirectory() {
val Dir = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DOCUMENTS)
if (!Dir.exists()) {
Dir.mkdirs()
}
//create all files
val IncomeFile = File(Dir, "Income.json")
if(!IncomeFile.exists()) {
val data: String = "{\"Incomes\":[]}"
val out: FileOutputStream = FileOutputStream(IncomeFile)
out.write(data.toByteArray())
}
val InvFile = File(Dir, "Investments.json")
if(!InvFile.exists()) {
val data: String = "{\"Investments\":[]}"
val out: FileOutputStream = FileOutputStream(InvFile)
out.write(data.toByteArray())
}
val ExpensesFile = File(Dir, "Expenses.json")
if(!ExpensesFile.exists()) {
val data: String = "{\"Expenses\":[]}"
val out: FileOutputStream = FileOutputStream(ExpensesFile)
out.write(data.toByteArray())
}
}
I have the permission request before this:
private fun askPermission() {
ActivityCompat.requestPermissions( this, arrayOf(android.Manifest.permission.WRITE_EXTERNAL_STORAGE,
android.Manifest.permission.READ_EXTERNAL_STORAGE),
PackageManager.PERMISSION_GRANTED)
}
I have the uses in manifest file:
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE"/>
Tried createFile and file.write(text) both not able to create new files.
I'm trying to put several event types in the same Kafka topic using the JSON format, but in the Producer implementation I'm always getting org.apache.kafka.common.errors.SerializationException: Error serializing JSON message. Seems that the annotation #Schema isn't working as expected is like the schema defined by the annotation isn't enriched properly and in the method that validates the backward compatibility the schema defined by my event has the schemaObj empty and the result is not compatible and fails.
My event:
#Schema(
value = "1",
refs = Array(new SchemaReference(name = "event", subject = "event"))
)
case class Event(#BeanProperty id: String,
#BeanProperty name: String)
Producer:
def send(): Unit = {
val props = new Properties() {
put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
put(
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringSerializer"
)
put(
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
"io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializer"
)
put("auto.register.schemas", "false")
put("use.latest.version", "true")
put("schema.registry.url", "http://127.0.0.1:8081")
put("json.fail.invalid.schema", "true")
}
val producer = new KafkaProducer[String, Event](props)
val topic = "all-json"
val key = "key1"
val event = Event("id", "name")
val record = new ProducerRecord[String, Event](topic, key, event)
producer.send(record).get
}
By the command line, I can perfectly produce the events. The JSON Schema is modeled by
{
"oneOf": [
{ "$ref": "Event.schema.json" },
{ "$ref": "EventB.schema.json" }
]
}
...
the dependencies of confluent used are the version 6.0.1.
Do you know what is the issue?
I would like to do a conditional bulk connection of bidirectional buses, conceptually like the following.
val io = IO(new Bundle {
val master = Decoupled(UInt(8.W))
val slave0 = Flipped(Decoupled(UInt(8.W)))
val slave1 = Flipped(Decoupled(UInt(8.W)))
val select = Input(Bool())
})
when (select) {
io.slave0 <> io.master
io.slave1 <> some_null_decoupled
}.otherwise {
io.slave1 <> io.master
io.slave0 <> some_null_decoupled
}
This is cleaner than having to individually describe the logic for the io.master.ready, io.slave0.bits, io.slave0.valid, ... etc signals.
Is there a syntax similar to this which will work? When I try this in my code, I get a lot of firrtl.passes.CheckInitialization$RefNotInitializedException messages.
I suspect the issue is with the description of some_null_decoupled. That looks sane other than the fact that some_null_decoupled is missing. The following works fine for me (using Chisel 3.1.6):
import chisel3._
import chisel3.util._
class ConditionalBulkConnect extends Module {
val io = IO(new Bundle {
val master = Decoupled(UInt(8.W))
val slave0 = Flipped(Decoupled(UInt(8.W)))
val slave1 = Flipped(Decoupled(UInt(8.W)))
val select = Input(Bool())
})
val some_null_decoupled = Wire(Decoupled(UInt(8.W)))
some_null_decoupled.ready := false.B
when (io.select) {
io.slave0 <> io.master
io.slave1 <> some_null_decoupled
}.otherwise {
io.slave1 <> io.master
io.slave0 <> some_null_decoupled
}
}
object ConditionalBulkConnectTop extends App {
chisel3.Driver.execute(args, () => new ConditionalBulkConnect)
}
Does this help at all? Otherwise can you provide more information, like the implementation of some_null_decoupled and version of Chisel?
This is my json data , i am sending this to kafka topics , reading using spark rdd ,
and save into cassandra.
[{
"sensor": "swapSensor",
"sendtime": "2016-09-15T11:05:01.000Z",
"data": [{
"#context": "Context"
}]
}]
this is my cassandra's table
CREATE TABLE IF NOT EXISTS event(sensor text,sendTime text,count bigint,entireJson text, PRIMARY KEY ((sensor)));
i wanted to push entire json(raw) data to table's column entireJson.
this is my code.
object StreamingData {
var count = 1
def main(args: Array[String]) {
val Array(brokers, topics, cassandraHost) = Array("1.11.22.50:9092", "c", "localhost")
def createSparkContext(): StreamingContext = {
val conf = new SparkConf()
.setAppName("c Events Processing")
.setMaster("local[2]")
.set("spark.cassandra.connection.host", cassandraHost)
.set("spark.cassandra.connection.keep_alive_ms", "60000") // prevent cassandra connection from being closed after every write
val sc = new SparkContext(conf)
// Create the context
val ssc = new StreamingContext(sc, Seconds(8))
val sqlContext = new SQLContext(sc);
// Kafka stream
val kafkaParams = Map[String, String]("metadata.broker.list" -> brokers)
val topicsSet = topics.split(",").toSet
val cEvents = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topicsSet).map(_._2)
cEvents.foreachRDD { rdd =>
count = count + 1
sqlContext.read.json(rdd).registerTempTable("eventTable")
val eventdf1 = sqlContext.sql("SELECT * FROM eventTable")
eventdf1.collect.foreach(println)
val eventdf = sqlContext.sql("SELECT sensor, sendtime,data.actor FROM eventTable")
eventdf.printSchema()
eventdf.map {
case (r) => (r.getString(0) + count, sendtime, count,eventdf1)
}
.saveToCassandra("c", "event", SomeColumns("sensor", "sendtime", "count","entireJson"))
}
ssc
}
}
create one entity class then map it table . using this you can directly store RDD into cassandra database .
==check following link
https://github.com/datastax/spark-cassandra-connector/blob/master/doc/6_advanced_mapper.md
I tried this , and it works to save rawdata into my Cassandras Table Column.
var rawdata = ""
for (item <- rdd.collect().toArray) {
System.out.println(item);
rawdata = item
}
I have a selectable feature which is not normally required. However to support this feature, some I/O ports should be added to the origin Module I/O port.
I am doing it in this way:
import Chisel._
class TestModule extends Module {
class IOBundle extends Bundle {
val i = Bool(INPUT)
val o = Bool(OUTPUT)
}
class IOBundle_EXT extends IOBundle {
val o_ext = Bool(OUTPUT)
}
val io = if(true) new IOBundle_EXT else new IOBundle;
io.o := io.i
io.o_ext := io.i
}
After running sbt "run TestModule --backend c --compile --test --genHarness", the compiler complains:
[error] xxxx/test/condi_port.scala:17: value o_ext is not a member of TestModule.this.IOBundle
[error] io.o_ext := io.i
[error] ^
[error] one error found
[error] (compile:compile) Compilation failed
So the if statement has no effect. val io is still assigned to IOBundle, rather than the extended IOBoundle_EXT, which makes no sense to me.
Chisel now supports Options in IO bundles.
As an example, I explored Options here (https://github.com/ucb-bar/riscv-boom/commit/da6edcb4b7bec341e31a55567ee04c8a1431d659), but here's a summary:
class MyBundle extends Bundle
{
val my_ext = if (SOME_SWITCH) Some(ExtBundle) else None
}
...
io.my_ext match
{
case Some(b: ExtBundle) =>
my_ext.a := Bool(false)
...
case _ => require (!SOME_SWITCH)
}
It's incredibly verbose, but I was able to get it working even when doing bulk connects and hiding bundles within bundles, etc.
Even though the compiler could determine that only one result is possible (the expression is always true), the type system sets the type of the result equal to the greatest common subtype of the two possible (true or false) sub-expressions.
You can verify this trivially with the following:
scala> val result = if (true) 1 else "one"
result: Any = 1
Try this:
import Chisel._
class TestModule(val useEXT : Boolean) extends Module {
class IOBundle extends Bundle {
val i = Bool(INPUT)
val o = Bool(OUTPUT)
}
class IOBundle_EXT extends IOBundle {
val o_ext = Bool(OUTPUT)
}
val io = {
if(useEXT) {
val res = new IOBundle_EXT; res.o_ext := res.i; res
} else {
new IOBundle }};
io.o := io.i
}