I'm trying my hand with tcpdump..
The eventually tcpdump command looks like this
sudo tcpdump -i eth0 -n -tt 'tcp port 6379 or port 5432 and (((ip[2:2] - ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0)'
My understanding is that ..
I'm filtering port 6379 and port 5432 for packets that contains data using (((ip[2:2] - ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0)
When I run this I see the following output.
1521257077.232079 IP 10.240.0.40.37978 > 10.240.0.9.6379: Flags [P.], seq 1142349306:1142349326, ack 4173563637, win 222, options [nop,nop,TS val 1441301100 ecr 2758188018], length 20: RESP "del" "a"
1521257077.234193 IP 10.240.0.9.6379 > 10.240.0.40.37978: Flags [P.], seq 1:5, ack 20, win 220, options [nop,nop,TS val 2758213022 ecr 1441301100], length 4: RESP "0"
1521257100.633083 IP 10.240.0.40.37978 > 10.240.0.9.6379: Flags [P.], seq 20:40, ack 5, win 222, options [nop,nop,TS val 1441306950 ecr 2758213022], length 20: RESP "del" "a"
1521257100.634825 IP 10.240.0.9.6379 > 10.240.0.40.37978: Flags [P.], seq 5:9, ack 40, win 220, options [nop,nop,TS val 2758218872 ecr 1441306950], length 4: RESP "0"
My goal is to correctly match the TCP request with the appropriate response.
I getting some inference from the seq number and ack like here
1521257100.633083 IP 10.240.0.40.37978 > 10.240.0.9.6379: Flags [P.], seq 20:40, ack 5, win 222, options [nop,nop,TS val 1441306950 ecr 2758213022], length 20: RESP "del" "a"
1521257100.634825 IP 10.240.0.9.6379 > 10.240.0.40.37978: Flags [P.], seq 5:9, ack 40, win 220, options [nop,nop,TS val 2758218872 ecr 1441306950], length 4: RESP "0"
I see ack(40) for a seq 20:40 but the problem is that I'm not able to draw inference for the 1st packet sent (at the start of the connection)
1521257077.232079 IP 10.240.0.40.37978 > 10.240.0.9.6379: Flags [P.], seq 1142349306:1142349326, ack 4173563637, win 222, options [nop,nop,TS val 1441301100 ecr 2758188018], length 20: RESP "del" "a"
1521257077.234193 IP 10.240.0.9.6379 > 10.240.0.40.37978: Flags [P.], seq 1:5, ack 20, win 220, options [nop,nop,TS val 2758213022 ecr 1441301100], length 4: RESP "0"
My only guess is the to look at the TCP val and ECR value.
How can I achieve that?
Related
One of the things this below code does is put different student IDs in tiny database after checking if the new ID is already present or not.
Code's below -
#enroll.py
# USAGE
# python enroll.py --id S1901 --name somename --conf config/config.json
# import the necessary packages
from pyimagesearch.utils import Conf
from imutils.video import VideoStream
from tinydb import TinyDB
from tinydb import where
import face_recognition
import argparse
import imutils
import pyttsx3
import time
import cv2
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--id", required=True,
help="Unique student ID of the student")
ap.add_argument("-n", "--name", required=True,
help="Name of the student")
ap.add_argument("-c", "--conf", required=True,
help="Path to the input configuration file")
args = vars(ap.parse_args())
# load the configuration file
conf = Conf(args["conf"])
# initialize the database and student table objects
db = TinyDB(conf["db_path"])
studentTable = db.table("student")
# retrieve student details from the database
student = studentTable.search(where(args["id"]))
# check if an entry for the student id does *not* exist, if so, then
# enroll the student
if len(student) == 0:
# initialize the video stream and allow the camera sensor to warmup
print("[INFO] warming up camera...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# initialize the number of face detections and the total number
# of images saved to disk
faceCount = 0
total = 0
# ask the student to stand in front of the camera
print("{} please stand in front of the camera until you" \
"receive further instructions".format(args["name"]))
# initialize the status as detecting
status = "detecting"
# create the directory to store the student's data
os.makedirs(os.path.join(conf["dataset_path"], conf["class"],
args["id"]), exist_ok=True)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream, resize it (so
# face detection will run faster), flip it horizontally, and
# finally clone the frame (just in case we want to write the
# frame to disk later)
frame = vs.read()
frame = imutils.resize(frame, width=400)
frame = cv2.flip(frame, 1)
orig = frame.copy()
# convert the frame from from RGB (OpenCV ordering) to dlib
# ordering (RGB) and detect the (x, y)-coordinates of the
# bounding boxes corresponding to each face in the input image
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(rgb,
model=conf["detection_method"])
# loop over the face detections
for (top, right, bottom, left) in boxes:
# draw the face detections on the frame
cv2.rectangle(frame, (left, top), (right, bottom),
(0, 255, 0), 2)
# check if the total number of face detections are less
# than the threshold, if so, then skip the iteration
if faceCount < conf["n_face_detection"]:
# increment the detected face count and set the
# status as detecting face
faceCount += 1
status = "detecting"
continue
# save the frame to correct path and increment the total
# number of images saved
p = os.path.join(conf["dataset_path"], conf["class"],
args["id"], "{}.png".format(str(total).zfill(5)))
cv2.imwrite(p, orig[top:bottom, left:right])
total += 1
# set the status as saving frame
status = "saving"
# draw the status on to the frame
cv2.putText(frame, "Status: {}".format(status), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
# show the output frame
cv2.imshow("Frame", frame)
cv2.waitKey(1)
# if the required number of faces are saved then break out from
# the loop
if total == conf["face_count"]:
# let the student know that face enrolling is over
print("Thank you {} you are now enrolled in the {} " \
"class.".format(args["name"], conf["class"]))
break
# insert the student details into the database
studentTable.insert({args["id"]: [args["name"], "enrolled"]})
# print the total faces saved and do a bit of cleanup
print("[INFO] {} face images stored".format(total))
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
vs.stop()
# otherwise, a entry for the student id exists
else:
# get the name of the student
name = student[0][args["id"]][0]
print("[INFO] {} has already already been enrolled...".format(
name))
# close the database
db.close()
ISSUE:
While i run this code for the 1st time, everything works fine.
>> python3 enroll.py --id S1111 --name thor --conf config/config.json
I get my ID in my json file as shown below -
{"student": {"1": {"S1111": ["thor", "enrolled"]}}}
But when i try to put another ID -
python3 enroll.py --id S1112 --name hulk --conf config/config.json
I get the following error -
ERROR:
Traceback (most recent call last):
File "enroll.py", line 35, in <module>
student = studentTable.search(where(args["id"]))
File "/usr/lib/python3.5/site-packages/tinydb/table.py", line 222, in search
docs = [doc for doc in self if cond(doc)]
File "/usr/lib/python3.5/site-packages/tinydb/table.py", line 222, in <listcomp>
docs = [doc for doc in self if cond(doc)]
File "/usr/lib/python3.5/site-packages/tinydb/queries.py", line 59, in __call__
return self._test(value)
File "/usr/lib/python3.5/site-packages/tinydb/queries.py", line 136, in notest
raise RuntimeError('Empty query was evaluated')
RuntimeError: Empty query was evaluated
If i change my table name from student to something else then again it will store id only for the first time then gives the same error. I'm not sure what's wrong here.
I have configured a standalone cluster (a node of 32gb & 32 cores) with 2 workers of 16 cores & 10gb memory each. The size of the JSON file is only 6gb. I have tried tweaking different configurations, including increasing young generation memory. Is there anything else I am missing?
PS: This doesn't work even in spark-shell.
Configurations inside env file.
SPARK_EXECUTOR_MEMORY=8g
SPARK_WORKER_CORES=16
SPARK_WORKER_INSTANCES=2
SPARK_WORKER_MEMORY=10g
I have tried following config params while submitting job:
./spark-submit --driver-memory 4g \
--supervise --verbose \
--conf spark.serializer=org.apache.spark.serializer.KryoSerializer \
--conf spark.memory.fraction=0.2 \
--conf spark.memory.storageFraction=0.5 \
--conf spark.network.timeout=10000000 \
--conf spark.executor.heartbeatInterval=10000000
val dataFrame = spark.read.option("multiline", "true")
.schema(schema)
.json(jsonFilePath)
.persist(StorageLevel.MEMORY_ONLY_SER_2)
dataFrame.createOrReplaceTempView("node")
val df = spark.sqlContext.sql("select col1, col2.x, col3 from node")
.persist(StorageLevel.MEMORY_ONLY_SER_2)
val newrdd = df.repartition(64)
.rdd.mapPartitions(partition => {
val newPartition = partition.map(x => {
//somefunction()
}).toList
newPartition.iterator
}).persist(StorageLevel.MEMORY_ONLY_SER_2)
Error:
Lost task 0.0 in stage 0.0 (TID 0, 131.123.39.101, executor 0): java.lang.OutOfMemoryError: GC overhead limit exceeded
Thanks in advance
I have buttons that are connected to keys in my json file. Using kivy's Clock module I check the value connected to the button and changes its colors. When the button turns red I want it's score value to turn 0 and be stored in the json file. When I have multiple buttons saved in the json file the score doesn't turn zero until I press one of the buttons and when I do press the button all the values except the first button turn 0, this is not what I want.
class MainApp(App):
def build(self): # build() returns an instance
self.store = JsonStore("streak.json") # file that stores the streaks:
Clock.schedule_interval(self.check_streak, 1 / 30.)
return presentation
def check_streak(self, dt):
for child in reversed(self.root.screen_two.ids.streak_zone.children):
name = child.text
with open("streak.json", "r") as read_file:
data = json.load(read_file)
for key in data.keys():
if key == name:
delay = data.get(key, {}).get('delay') # get value of nested key 'delay'
self.honey = data[key]['delta']
float(self.honey)
if delay > time.time() < self.honey: # early (yellow)
child.background_normal = ''
child.background_color = [1, 1, 0, 1]
child.unbind(on_press=self.add_score)
child.bind(on_press=self.early_click)
elif delay > time.time() > self.honey: # on time (green)
child.background_normal = ''
child.background_color = [0, 1, 0, 1]
child.unbind(on_press=self.early_click)
child.bind(on_press=self.add_score)
elif delay < time.time() > self.honey: # late (red)
child.background_normal = ''
child.background_color = [1, 0, 0, 1]
child.unbind(on_press=self.add_score)
child.unbind(on_press=self.early_click)
with open("streak.json", "r+") as f:
files = json.load(f)
files[child.text]['score'] = 0
f.seek(0)
json.dump(files, f, indent=4)
f.truncate()
json file:
{
"One": {
"action": "One",
"delay": 1558740875.58999,
"seconds": 60,
"score": 3,
"delta": 1558740815.58999,
"grace_sec": 120
},
"Two": {
"action": "Two",
"delay": 1558740752.0085213,
"seconds": 60,
"score": 0,
"delta": 1558740692.0085213,
"grace_sec": 120
},
"Three": {
"action": "Three",
"delay": 1558746820.4364505,
"seconds": 60,
"score": 0,
"delta": 1558740820.4364505,
"grace_sec": 6060
}
}
I want only the button that is red to change its score to 0 but both Two and Three change, even though only Two was red. Also, the score only changes when I press a green button, in this case, it was One. This is not what I want. I want the score to update its self using the Clock module.
case 1: I think time.time() is always greater both delay and honey and it enters third condition every time and sets score to zero.
Another option: Just keep the track of all the buttons that are red in a list and iterate them and update the json file at once.
created a separate function that uses clock to update the json file.
# change score to 0 and stores in json file
def score_gone(self, dt):
for child in self.root.screen_two.ids.streak_zone.children:
name = child.text
color = child.background_color
with open("streak.json", "r") as file:
read = json.load(file)
if color == [1, 0, 0, .95]: # red
if read[name]['score'] != 0: #stops slow down from Clock
with open("streak.json", "r+") as f: # fix score not reseting to 0
data = json.load(f)
data[name]['score'] = 0
f.seek(0)
json.dump(data, f, indent=4)
f.truncate()
elif read[name]['score'] == 0: #stops slow down from Clock
pass
The result of Json4s decoding frequently scramble the sequence of element in a JObject if decoding into a HashMap, so I tried to decode into ListMap instead. However, there seems to be no way of doing this, when I run the following simple program:
val v: ListMap[String, Int] = ListMap("a" -> 1, "b" -> 2)
val json = JsonMethods.compact(Extraction.decompose(v))
val v2 = Extraction.extract[ListMap[String, Int]](JsonMethods.parse(json))
assert(v == v2)
The following error message was thrown:
scala.collection.immutable.Map$Map2 cannot be cast to scala.collection.immutable.ListMap
java.lang.ClassCastException: scala.collection.immutable.Map$Map2 cannot be cast to scala.collection.immutable.ListMap
Is there an easy way to fix this? Or should I switch to more recent Json libraries (Argonaut/Circe) instead?
No, you can't do this. At least not this way. According to the JSON spec
An object is an unordered set of name/value pairs.
And all the standard libraries treat it that way. It means that the order is already scrambled when you/library do the initial parsing into intermediate data structure. Moreover, you can't even guarantee that the JSON will be {"a":1, "b":2} instead of {"b":2, "a":1}
The only way to preserve the order is to store in inside the JSON in a way that enforces the order and the only such thing is an ordered list of values aka array. So you can do something like this:
val v: ListMap[String, Int] = ListMap("c" -> 1, "AaAa" -> 2, "BBBB" -> 3, "AaBB" -> 4, "BBAa" -> 5)
val jsonBad = JsonMethods.compact(Extraction.decompose(v))
val bad = Extraction.extract[Map[String, Int]](JsonMethods.parse(jsonBad))
val jsonGood = JsonMethods.compact(Extraction.decompose(v.toList))
val good = ListMap(Extraction.extract[List[(String, Int)]](JsonMethods.parse(jsonGood)): _*)
println(s"'$jsonBad' => $bad")
println(s"'$jsonGood' => $good")
Which prints
'{"c":1,"AaAa":2,"BBBB":3,"AaBB":4,"BBAa":5}' => Map(AaAa -> 2, BBBB -> 3, AaBB -> 4, BBAa -> 5, c -> 1)
'[{"c":1},{"AaAa":2},{"BBBB":3},{"AaBB":4},{"BBAa":5}]' => ListMap(c -> 1, AaAa -> 2, BBBB -> 3, AaBB -> 4, BBAa -> 5)
Here is a library, which supports all Scala collections, so you can parse and serialize to/from ListMap easy, also it serializes case class fields in stable order of declaration:
libraryDependencies ++= Seq(
"com.github.plokhotnyuk.jsoniter-scala" %% "jsoniter-scala-core" % "0.29.2" % Compile,
"com.github.plokhotnyuk.jsoniter-scala" %% "jsoniter-scala-macros" % "0.29.2" % Provided // required only in compile-time
)
import com.github.plokhotnyuk.jsoniter_scala.macros._
import com.github.plokhotnyuk.jsoniter_scala.core._
val codec = JsonCodecMaker.make[ListMap[String, Int]](CodecMakerConfig())
val v: ListMap[String, Int] = ListMap("a" -> 1, "b" -> 2, "c" -> 3, "d" -> 4, "e" -> 5)
val json = writeToArray(codec, v)
val v2 = readFromArray(codec, json)
require(v == v2)
When I try to decode SMB HEADER with ctypes - from_buffer_copy, it gains an error. I've defined a fields (32 bytes), why python shows me ValueError: Buffer size too small (32 instead of at least 40 bytes) ?
CodeLab: Mac OS X 64bit
CodeLab: Linux
# python2.7 smbproto.py
Traceback (most recent call last):
File "smbproto.py", line 77, in <module>
SMB_HEADER(data)
File "smbproto.py", line 39, in __new__
return self.from_buffer_copy(buffer)
ValueError: Buffer size too small (32 instead of at least 40 bytes)
Code here.
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ctypes import *
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__file__)
class SMB_HEADER(Structure):
_fields_ = [
("server_component", c_uint32),
("smb_command", c_uint8),
("error_class", c_uint8),
("reserved1", c_uint8),
("error_code", c_uint16),
("flags", c_uint8),
("flags2", c_uint16),
("process_id_high", c_uint16),
("signature", c_uint64),
("reserved2", c_uint16),
("tree_id", c_uint16),
("process_id", c_uint16),
("user_id", c_uint16),
("multiplex_id", c_uint16)
]
def __new__(self, buffer=None):
return self.from_buffer_copy(buffer)
def __init__(self, buffer):
print("%04x" % self.server_component)
print("%01x" % self.smb_command)
print("%01x" % self.error_class)
print("%01x" % self.reserved1)
print("%02x" % self.error_code)
print("%01x" % self.flags)
print("%02x" % self.flags2)
print("%02x" % self.process_id_high)
print("%08x" % self.signature)
print("%02x" % self.reserved2)
print("%02x" % self.tree_id)
print("%02x" % self.process_id)
print("%02x" % self.user_id)
print("%02x" % self.multiplex_id)
if __name__ == '__main__':
data = (
'\xffSMB' # server_component
's' # smb_command
'\x00' # error_class
'\x00' # reserved1
'\x00\x00' # error code
'\x98' # flags
'\x01 ' # flags2
'\x00\x00' # process_id_high
'\x00\x00\x00\x00\x00\x00\x00\x00' # signature
'\x00\x00' # reserved2
'\x00\x00' # tree_id
'/K' # process_id
'\x00\x10' # user_id
'\xc5^' # multiplex_id
)
SMB_HEADER(data)
you got too much in your header struct:
SMB_Header
{
UCHAR Protocol[4];
UCHAR Command;
SMB_ERROR Status;
UCHAR Flags;
USHORT Flags2;
USHORT PIDHigh;
UCHAR SecurityFeatures[8];
USHORT Reserved;
USHORT TID;
USHORT PIDLow;
USHORT UID;
USHORT MID;
}