Sent a message to group from ejabberd server - ejabberd

Sent a message to group from ejabberd server but i get
Hook user_receive_packet crashed when running
mod_mam:user_receive_packet
send_message(Type, From, To, Subject, Body, StaticNumber) ->
CodecOpts = ejabberd_config:codec_options(),
try xmpp:decode(
#xmlel{name = <<"message">>,
attrs = [{<<"to">>, To },
{<<"from">>,From},
{<<"type">>, Type},
{<<"id">>, p1_rand:get_string()}],
children =
[#xmlel{name = <<"subject">>,
children = [{xmlcdata, Subject}]},
#xmlel{name = <<"groupcontent">>,
attrs = [{<<"sendername">>, <<"Admin">>},
{<<"acknowStatus">>, <<"0">>},{<<"fromadmin">>, StaticNumber}],
children = []},
#xmlel{name = <<"body">>,
children = [{xmlcdata, Body}]}]},
?NS_CLIENT, CodecOpts) of
#message{from = JID} = Msg ->
State = #{jid => JID},
ejabberd_hooks:run_fold(user_send_packet, JID#jid.lserver, {Msg, State}, []),
ejabberd_router:route(Msg)
catch _:{xmpp_codec, Why} ->
{error, xmpp:format_error(Why)}
end.
function call :
send_message("normal",
list_to_binary("123456789#xmpp.designcafe.com"),
list_to_binary("6ff3d0a4-c281-41bd-a262-c65bd767014d#mix.xmpp.designcafe.com"),
list_to_binary("text"), <<"test">>, <<"123456789">>);
I could not fix above issue

send_message("normal",
Instead of "normal", you must provide groupchat as a binary, that is:
send_message(<<"groupchat">>,
What that change it works for me using ejabberd 22.05. It's important that From is an existing account, and it joined the MIX Channel. Of course the MIX Channel must exist too.

Related

I am trying to create a hit using html file for amazon mturk

import boto3
#making client object
MTURK_SANDBOX = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
mturk = boto3.client('mturk',
aws_access_key_id = "AKIA3RTXAGOQVVBX3PWF",
aws_secret_access_key = "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",
region_name='us-east-1',
endpoint_url = MTURK_SANDBOX
)
questionfile = open("/home/nm6088/mturk files/prac4_Jun27/index.html","r")
questions = questionfile .read()
localRequirements = [{
'QualificationTypeId': '00000000000000000071',
'Comparator': 'EqualTo',
'LocaleValues': [{
'Country': 'US'
}],
'RequiredToPreview': True
}]
hit = mturk.create_hit(
Title='Write a simple version of the test',
Description='A test HIT that requires the user to write a simple text.',
Keywords='simple, qualification, test',
Reward='0.01',
MaxAssignments=1,
LifetimeInSeconds=3600,
AssignmentDurationInSeconds=600,
AutoApprovalDelayInSeconds=200,
Question = questions,
QualificationRequirements=localRequirements
)
print ("A new HIT has been created. You can preview it here:")
print ("https://workersandbox.mturk.com/mturk/preview?groupId=" + hit['HIT']['HITGroupId'])
print ("HITID = " + hit['HIT']['HITId'] + " (Use to Get Results)")
botocore.exceptions.ClientError: An error occurred (ParameterValidationError) when calling the CreateHIT operation: There was an error parsing the XML question or answer data in your request. Please make sure the data is well-formed and validates against the appropriate schema. Details: cvc-elt.1.a: Cannot find the declaration of element 'HTMLQuestion'. (1656352060543 s)

ejabberd add_channel API for MIX

Currently, there is no api for creating a MIX channel.
I'm written a custom module for the same.
So far, I have written the following code. But I'm not how to proceed further.
I would really appreciate someone's guidance here. Thanks in advance.
-module(mod_custom).
-behaviour(gen_mod).
-include("logger.hrl").
-export([start/2, stop/1, reload/3, mod_options/1,
get_commands_spec/0, depends/2]).
-export([
% Create channel
add_channel/4
]).
-include("ejabberd_commands.hrl").
-include("ejabberd_sm.hrl").
-include("xmpp.hrl").
start(_Host, _Opts) ->
ejabberd_commands:register_commands(get_commands_spec()).
stop(Host) ->
case gen_mod:is_loaded_elsewhere(Host, ?MODULE) of
false ->
ejabberd_commands:unregister_commands(get_commands_spec());
true ->
ok
end.
reload(_Host, _NewOpts, _OldOpts) ->
ok.
depends(_Host, _Opts) ->
[].
get_commands_spec() ->
[
#ejabberd_commands{name = add_channel, tags = [group],
desc = "Create a WhatsApp like group",
module = ?MODULE, function = add_channel,
args = [{jid, binary}, {channel, binary}, {id, binary}],
args_example = [<<"admin#localhost">>, <<"testgroup123#localhost">>, <<"abc123456">>],
args_desc = ["Admin JID", "Channel JID", "Unique ID"],
result = {res, rescode}}
].
add_channel(JID, Channel, ID) ->
%%% Create channel code goes here...
ok.
mod_options(_) -> [].
Try something like this:
-module(mod_custom).
-behaviour(gen_mod).
-export([start/2, stop/1, reload/3, mod_options/1,
get_commands_spec/0, depends/2]).
-export([create_channel/3]).
-include("logger.hrl").
-include("ejabberd_commands.hrl").
-include("ejabberd_sm.hrl").
-include_lib("xmpp/include/xmpp.hrl").
start(_Host, _Opts) ->
ejabberd_commands:register_commands(get_commands_spec()).
stop(Host) ->
case gen_mod:is_loaded_elsewhere(Host, ?MODULE) of
false ->
ejabberd_commands:unregister_commands(get_commands_spec());
true ->
ok
end.
reload(_Host, _NewOpts, _OldOpts) ->
ok.
depends(_Host, _Opts) ->
[].
get_commands_spec() ->
[#ejabberd_commands{name = create_channel, tags = [group],
desc = "Create a WhatsApp like group",
module = ?MODULE, function = create_channel,
args = [{from, binary},
{channel, binary},
{service, binary}],
args_example = [<<"admin#localhost">>,
<<"testgroup123">>,
<<"mix.localhost">>],
args_desc = ["From JID", "Channel Name", "MIX Service"],
result = {res, rescode}}
].
create_channel(From, ChannelName, Service) ->
try xmpp:decode(
#xmlel{name = <<"iq">>,
attrs = [{<<"to">>, Service},
{<<"from">>, From},
{<<"type">>, <<"set">>},
{<<"id">>, p1_rand:get_string()}],
children =
[#xmlel{name = <<"create">>,
attrs = [{<<"channel">>, ChannelName},
{<<"xmlns">>, ?NS_MIX_CORE_0}]}
]},
?NS_CLIENT, []) of
#iq{type = set} = Iq ->
case mod_mix:process_mix_core(Iq) of
#iq{type = result} ->
ok;
_ ->
{error, unexpected_response}
end
catch _:{xmpp_codec, Why} ->
{error, xmpp:format_error(Why)}
end.
mod_options(_) -> [].

Confluent kafka Python client Avro producer.producer() executes without error but no data in topic

My producer isnt throwing any errors but data is not being sent to the destination topic. Can you recommend any techniques to debug this situation.
I have call to a Confluent Python Avro Producer inside a synchronous loop to send data to a topic like so:
self.producer.produce(topic=test2, value=msg_dict)
After this call I have a piece of code like so to flush the queue:
num_messages_in_queue = self.producer.flush(timeout = 2.0)
print(f"flushed {num_messages_in_queue} messages from producer queue in iteration {num_iterations} ")
this executes without any error. But also there is no callback fired after this code executes. My producer is initiated as follows:
def __init__(self,broker_url=None,topic=None,schema_registry_url=None,schema_path=None):
try:
with open(schema_path, 'r') as content_file:
schema = avro.loads(content_file.read())
except Exception as e:
print(f"Error when trying to read avro schema file : {schema_path}")
self.conf = {
'bootstrap.servers': broker_url,
'on_delivery': self.delivery_report,
'schema.registry.url': schema_registry_url,
'acks': -1, #This guarantees that the record will not be lost as long as at least one in-sync replica remains alive.
'enable.idempotence': False, #
"error_cb":self.error_cb
}
self.topic = topic
self.schema_path = schema_path
self.producer = AvroProducer(self.conf,default_key_schema=schema, default_value_schema=schema)
My callback method is as follows:
def delivery_report(self, err, msg):
print(f"began delivery_report")
if err is None:
print(f"delivery_report --> Delivered msg.value = {msg.value()} to topic= {msg.topic()} offset = {msg.offset} without err.")
else:
print(f"conf_worker AvroProducer failed to deliver message {msg.value()} to topic {self.topic}. got error= {err}")
After this code is executed, I look at my topic on the schema registry container like so:
docker exec schema_registry_container kafka-avro-console-consumer --bootstrap-server kafka:29092 --topic test2 --from-beginning
I see this output:
[2020-04-03 15:48:38,064] INFO Registered kafka:type=kafka.Log4jController MBean
(kafka.utils.Log4jControllerRegistration$)
[2020-04-03 15:48:38,742]
INFO ConsumerConfig values:
auto.commit.interval.ms = 5000
auto.offset.reset = earliest
bootstrap.servers = [kafka:29092]
check.crcs = true
client.dns.lookup = default
client.id =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = console-consumer-49056
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
isolation.level = read_uncommitted
key.deserializer = class >> org.apache.kafka.common.serialization.ByteArrayDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 300000
max.poll.records = 500
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = 65536
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
send.buffer.bytes = 131072
session.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = https
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class >>org.apache.kafka.common.serialization.ByteArrayDeserializer
(org.apache.kafka.clients.consumer.ConsumerConfig)
[2020-04-03 15:48:38,887] INFO Kafka version : 2.1.0-cp1 (org.apache.kafka.common.utils.AppInfoParser)
[2020-04-03 15:48:38,887] INFO Kafka commitId : bda8715f42a1a3db (org.apache.kafka.common.utils.AppInfoParser)
[2020-04-03 15:48:39,221] INFO Cluster ID: KHKziPBvRKiozobbwvP1Fw (org.apache.kafka.clients.Metadata)
[2020-04-03 15:48:39,224] INFO [Consumer clientId=consumer-1, groupId=console-consumer-49056] Discovered group coordinator kafka:29092 (id: 2147483646 rack: null) (org.apache.kafka.clients.consumer.internals.AbstractCoordinator)
[2020-04-03 15:48:39,231] INFO [Consumer clientId=consumer-1, groupId=console-consumer-49056] Revoking previously assigned partitions []
(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator)
[2020-04-03 15:48:39,231] INFO [Consumer clientId=consumer-1, groupId=console-consumer-49056] (Re-)joining group >(org.apache.kafka.clients.consumer.internals.AbstractCoordinator)
[2020-04-03 15:48:42,264] INFO [Consumer clientId=consumer-1, groupId=console-consumer-49056] Successfully joined group with generation 1
(org.apache.kafka.clients.consumer.internals.AbstractCoordinator)
[2020-04-03 15:48:42,267] INFO [Consumer clientId=consumer-1, groupId=console-consumer-49056] Setting newly assigned partitions [test2-0] >(org.apache.kafka.clients.consumer.internals.ConsumerCoordinator)
[2020-04-03 15:48:42,293] INFO [Consumer clientId=consumer-1, groupId=console-consumer-49056] Resetting offset for partition test2-0 to offset 0. >(org.apache.kafka.clients.consumer.internals.Fetcher)
So the answer is so trivial that its embarassing!
But it does point to the fact that in a multilayered infrastructure, a single value incorrectly set, can result in a silent failure which can be very tedious to track down.
So the issue came from incorrect param setting my in my docker-compose.yml file, where the env variable for broker_url was not set.
The application code needed this variable to reference the kafka broker.
However there was no exception thrown for this missing param and it was silently failing.

Check_MK - Custom check params specified in wato not being given to check function

I am working on a check_mk plugin and can't seem to get the WATO specified params passed to the check function when it runs for one check in particular...
The check param rule shows in WATO
It writes correct looking values to rules.mk
Clicking the Analyze check parameters icon from a hosts service discovery shows the rule as active.
The check parameters displayed in service discovery show the title from the WATO file so it seems like it is associating things correctly.
Running cmk -D <hostname> shows the check as always having the default values though.
I have been staring at it for awhile and am out of ideas.
Check_MK version: 1.2.8p21 Raw
Bulk of check file:
factory_settings["elasticsearch_status_default"] = {
"min": (600, 300)
}
def inventory_elasticsearch_status(info):
for line in info:
yield restore_whitespace(line[0]), {}
def check_elasticsearch_status(item, params, info):
for line in info:
name = restore_whitespace(line[0])
message = restore_whitespace(line[2])
if name == item:
return get_status_state(params["min"], name, line[1], message, line[3])
check_info['elasticsearch_status'] = {
"inventory_function" : inventory_elasticsearch_status,
"check_function" : check_elasticsearch_status,
"service_description" : "ElasticSearch Status %s",
"default_levels_variable" : "elasticsearch_status_default",
"group" : "elasticsearch_status",
"has_perfdata" : False
}
Wato File:
group = "checkparams"
#subgroup_applications = _("Applications, Processes & Services")
register_check_parameters(
subgroup_applications,
"elasticsearch_status",
_("Elastic Search Status"),
Dictionary(
elements = [
( "min",
Tuple(
title = _("Minimum required status age"),
elements = [
Age(title = _("Warning if below"), default_value = 600),
Age(title = _("Critical if below"), default_value = 300),
]
))
]
),
None,
match_type = "dict",
)
Entry in rules.mk from WATO rule:
checkgroup_parameters.setdefault('elasticsearch_status', [])
checkgroup_parameters['elasticsearch_status'] = [
( {'min': (3600, 1800)}, [], ALL_HOSTS ),
] + checkgroup_parameters['elasticsearch_status']
Let me know if any other information would be helpful!
EDIT: pls help
Posted question here as well and the mystery got solved.
I was matching the WATO rule to item None (5th positional arg in the WATO file), but since this check had multiple items inventoried under it (none of which had the id None) the rule was applying to the host, but not to any of the specific service checks.
Fix was to replace that param with:
TextAscii( title = _("Status Description"), allow_empty = True),

Chain http request and merge json response in ELM

I've succeeded in triggering a simple http request in ELM and decoding the JSON response into an ELM value - [https://stackoverflow.com/questions/43139316/decode-nested-variable-length-json-in-elm]
The problem I'm facing now-
How to chain (concurrency preferred) two http requests and merge the json into my new (updated) model. Note - please see the updated Commands.elm
Package used to access remote data - krisajenkins/remotedata http://package.elm-lang.org/packages/krisajenkins/remotedata/4.3.0/RemoteData
Github repo of my code - https://github.com/areai51/my-india-elm
Previous Working Code -
Models.elm
type alias Model =
{ leaders : WebData (List Leader)
}
initialModel : Model
initialModel =
{ leaders = RemoteData.Loading
}
Main.elm
init : ( Model, Cmd Msg )
init =
( initialModel, fetchLeaders )
Commands.elm
fetchLeaders : Cmd Msg
fetchLeaders =
Http.get fetchLeadersUrl leadersDecoder
|> RemoteData.sendRequest
|> Cmd.map Msgs.OnFetchLeaders
fetchLeadersUrl : String
fetchLeadersUrl =
"https://data.gov.in/node/85987/datastore/export/json"
Msgs.elm
type Msg
= OnFetchLeaders (WebData (List Leader))
Update.elm
update msg model =
case msg of
Msgs.OnFetchLeaders response ->
( { model | leaders = response }, Cmd.none )
Updated Code - (need help with Commands.elm)
Models.elm
type alias Model =
{ lsLeaders : WebData (List Leader)
, rsLeaders : WebData (List Leader) <------------- Updated Model
}
initialModel : Model
initialModel =
{ lsLeaders = RemoteData.Loading
, rsLeaders = RemoteData.Loading
}
Main.elm
init : ( Model, Cmd Msg )
init =
( initialModel, fetchLeaders )
Commands.elm
fetchLeaders : Cmd Msg
fetchLeaders = <-------- How do I call both requests here ? And fire separate msgs
Http.get fetchLSLeadersUrl lsLeadersDecoder <----- There will be a different decoder named rsLeadersDecoder
|> RemoteData.sendRequest
|> Cmd.map Msgs.OnFetchLSLeaders
fetchLSLeadersUrl : String
fetchLSLeadersUrl =
"https://data.gov.in/node/85987/datastore/export/json"
fetchRSLeadersUrl : String <------------------ New data source
fetchRSLeadersUrl =
"https://data.gov.in/node/982241/datastore/export/json"
Msgs.elm
type Msg
= OnFetchLSLeaders (WebData (List Leader))
| OnFetchRSLeaders (WebData (List Leader)) <-------- New message
Update.elm
update msg model =
case msg of
Msgs.OnFetchLSLeaders response ->
( { model | lsLeaders = response }, Cmd.none )
Msgs.OnFetchRSLeaders response -> <--------- New handler
( { model | rsLeaders = response }, Cmd.none )
The way to fire off two concurrent requests is to use Cmd.batch:
init : ( Model, Cmd Msg )
init =
( initialModel, Cmd.batch [ fetchLSLeaders, fetchRSLeaders ] )
There is no guarantee on which request will return first and there is no guarantee that they will both be successful. One could fail while the other succeeds, for example.
You mention that you want to merge the results, but you didn't say how the merge would work, so I'll just assume you want to append the lists of leaders together in one list, and it will be useful to your application if you had only to deal with a single RemoteData value rather than multiple.
You can merge multiple RemoteData values together with a custom function using map and andMap.
mergeLeaders : WebData (List Leader) -> WebData (List Leader) -> WebData (List Leader)
mergeLeaders a b =
RemoteData.map List.append a
|> RemoteData.andMap b
Notice that I'm using List.append there. That can really be any function that takes two lists and merges them however you please.
If you prefer an applicative style of programming, the above could be translated to the following infix version:
import RemoteData.Infix exposing (..)
mergeLeaders2 : WebData (List Leader) -> WebData (List Leader) -> WebData (List Leader)
mergeLeaders2 a b =
List.append <$> a <*> b
According to the documentation on andMap (which uses a result tuple rather than an appended list in its example):
The final tuple succeeds only if all its children succeeded. It is still Loading if any of its children are still Loading. And if any child fails, the error is the leftmost Failure value.