Not found exception smallrye-kafka producer in quarkus application - kafka-producer-api

have a simple poc built on quarkus to send a avro message to a kafka topic.
below is the configurations that were loaded -
acks = 1
batch.size = 16384
bootstrap.servers = [kaas-test-dc-a.company.com:443]
buffer.memory = 33554432
client.dns.lookup = use_all_dns_ips
client.id = kafka-producer-cpe-reply
compression.type = none
connections.max.idle.ms = 540000
delivery.timeout.ms = 120000
enable.idempotence = false
interceptor.classes = []
internal.auto.downgrade.txn.commit = false
key.serializer = class org.apache.kafka.common.serialization.StringSerializer
linger.ms = 0
max.block.ms = 60000
max.in.flight.requests.per.connection = 5
max.request.size = 1048576
metadata.max.age.ms = 300000
metadata.max.idle.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
receive.buffer.bytes = 32768
reconnect.backoff.max.ms = 10000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retries = 2147483647
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit
sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism = GSSAPI
security.protocol = SSL
security.providers = null
send.buffer.bytes = 131072
socket.connection.setup.timeout.max.ms = 127000
socket.connection.setup.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
ssl.endpoint.identification.algorithm = https
ssl.engine.factory.class = null
ssl.key.password = [hidden]
ssl.keymanager.algorithm = SunX509
ssl.keystore.certificate.chain = null
ssl.keystore.key = null
ssl.keystore.location = C:/keystore.jks
ssl.keystore.password = [hidden]
ssl.keystore.type = JKS
ssl.protocol = TLSv1.3
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.certificates = null
ssl.truststore.location = C:/truststore.jks
ssl.truststore.password = [hidden]
ssl.truststore.type = JKS
transaction.timeout.ms = 60000
transactional.id = null
value.serializer = class io.apicurio.registry.utils.serde.AvroKafkaSerializer
Below is the error am seeing -
2021-06-29 23:36:59,012 INFO [org.apa.kaf.com.uti.AppInfoParser] (Quarkus Main Thread) Kafka version: 2.7.0
2021-06-29 23:36:59,012 INFO [org.apa.kaf.com.uti.AppInfoParser] (Quarkus Main Thread) Kafka commitId: 448719dc99a19793
2021-06-29 23:36:59,015 INFO [org.apa.kaf.com.uti.AppInfoParser] (Quarkus Main Thread) Kafka startTimeMs: 1625024218998
2021-06-29 23:36:59,757 INFO [io.quarkus] (Quarkus Main Thread) aru-unet-Service-replyws 1.0.0 on JVM (powered by Quarkus 1.13.7.Final) started in 33.944s. Listening on: http://localhost:8080
2021-06-29 23:36:59,758 INFO [io.quarkus] (Quarkus Main Thread) Profile dev activated. Live Coding activated.
2021-06-29 23:36:59,761 INFO [io.quarkus] (Quarkus Main Thread) Installed features: [camel-support-retrofit, cdi, kotlin, mutiny, quarkiverse-apicurio-registry-client, resteasy, resteasy-jackson, smallrye-context-propagation, smallrye-openapi, smallrye-reactive-messaging, smallrye-reactive-messaging-kafka, swagger-ui, vertx]
2021-06-29 23:37:00,049 INFO [org.apa.kaf.cli.Metadata] (kafka-producer-network-thread | kafka-producer-cpe-reply) [Producer clientId=kafka-producer-cpe-reply] Cluster ID: 6cbv7QOaSW6j1vXrOCE4jA
2021-06-29 23:40:09,465 INFO [ServiceResponseServices] (executor-thread-1) Sending response string to Kafka
2021-06-29 23:40:10,206 ERROR [io.sma.rea.mes.kafka] (vert.x-eventloop-thread-14) SRMSG18206: Unable to write to Kafka from channel cpe-reply (topic: kaas.aru-unet.dev.prcngevts.v1): javax.ws.rs.WebApplicationException: Not Found
at io.apicurio.registry.client.request.RequestHandler$ResultCallback.onResponse(RequestHandler.java:38)
at retrofit2.OkHttpCall$1.onResponse(OkHttpCall.java:129)
at okhttp3.RealCall$AsyncCall.execute(RealCall.java:174)
at okhttp3.internal.NamedRunnable.run(NamedRunnable.java:32)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at java.base/java.lang.Thread.run(Thread.java:829)
Would appreciate some help on the exception above.

Related

How to resume training in spacy transformers for NER

I have created a spacy transformer model for named entity recognition. Last time I trained till it reached 90% accuracy and I also have a model-best directory from where I can load my trained model for predictions. But now I have some more data samples and I wish to resume training this spacy transformer. I saw that we can do it by changing the config.cfg but clueless about 'what to change?'
This is my config.cfg after running python -m spacy init fill-config ./base_config.cfg ./config.cfg:
[paths]
train = null
dev = null
vectors = null
init_tok2vec = null
[system]
gpu_allocator = "pytorch"
seed = 0
[nlp]
lang = "en"
pipeline = ["transformer","ner"]
batch_size = 128
disabled = []
before_creation = null
after_creation = null
after_pipeline_creation = null
tokenizer = {"#tokenizers":"spacy.Tokenizer.v1"}
[components]
[components.ner]
factory = "ner"
incorrect_spans_key = null
moves = null
scorer = {"#scorers":"spacy.ner_scorer.v1"}
update_with_oracle_cut_size = 100
[components.ner.model]
#architectures = "spacy.TransitionBasedParser.v2"
state_type = "ner"
extra_state_tokens = false
hidden_width = 64
maxout_pieces = 2
use_upper = false
nO = null
[components.ner.model.tok2vec]
#architectures = "spacy-transformers.TransformerListener.v1"
grad_factor = 1.0
pooling = {"#layers":"reduce_mean.v1"}
upstream = "*"
[components.transformer]
factory = "transformer"
max_batch_items = 4096
set_extra_annotations = {"#annotation_setters":"spacy-transformers.null_annotation_setter.v1"}
[components.transformer.model]
#architectures = "spacy-transformers.TransformerModel.v3"
name = "roberta-base"
mixed_precision = false
[components.transformer.model.get_spans]
#span_getters = "spacy-transformers.strided_spans.v1"
window = 128
stride = 96
[components.transformer.model.grad_scaler_config]
[components.transformer.model.tokenizer_config]
use_fast = true
[components.transformer.model.transformer_config]
[corpora]
[corpora.dev]
#readers = "spacy.Corpus.v1"
path = ${paths.dev}
max_length = 0
gold_preproc = false
limit = 0
augmenter = null
[corpora.train]
#readers = "spacy.Corpus.v1"
path = ${paths.train}
max_length = 0
gold_preproc = false
limit = 0
augmenter = null
[training]
accumulate_gradient = 3
dev_corpus = "corpora.dev"
train_corpus = "corpora.train"
seed = ${system.seed}
gpu_allocator = ${system.gpu_allocator}
dropout = 0.1
patience = 1600
max_epochs = 0
max_steps = 20000
eval_frequency = 200
frozen_components = []
annotating_components = []
before_to_disk = null
[training.batcher]
#batchers = "spacy.batch_by_padded.v1"
discard_oversize = true
size = 2000
buffer = 256
get_length = null
[training.logger]
#loggers = "spacy.ConsoleLogger.v1"
progress_bar = false
[training.optimizer]
#optimizers = "Adam.v1"
beta1 = 0.9
beta2 = 0.999
L2_is_weight_decay = true
L2 = 0.01
grad_clip = 1.0
use_averages = false
eps = 0.00000001
[training.optimizer.learn_rate]
#schedules = "warmup_linear.v1"
warmup_steps = 250
total_steps = 20000
initial_rate = 0.00005
[training.score_weights]
ents_f = 1.0
ents_p = 0.0
ents_r = 0.0
ents_per_type = null
[pretraining]
[initialize]
vectors = ${paths.vectors}
init_tok2vec = ${paths.init_tok2vec}
vocab_data = null
lookups = null
before_init = null
after_init = null
[initialize.components]
[initialize.tokenizer]
As you can see there is a 'vectors' parameter under [initialize] so I tried giving vectors from 'model-best' like this:
But it gave me this error
OSError: [E884] The pipeline could not be initialized because the vectors could not be found at './model-best/ner'. If your pipeline was already initialized/trained before, call 'resume_training' instead of 'initialize', or initialize only the components that are new.
For those who are wondering that I have been given the wrong path. No, that directory exists. You can see directory structure,
So, please guide me on how I can successfully resume the training from previous weights.
Thank you!
The vectors setting is not related to the transformer or what you're trying to do.
In the new config, you want to use the source option to load the components from the existing pipeline. You would modify the [component] blocks to contain only the source setting and no other settings:
[components.ner]
source = "/path/to/model-best"
[components.transformer]
source = "/path/to/model-best"
See: https://spacy.io/usage/training#config-components
Vector sizes refer to word vectors here. To use the vocabulary from the previously trained Spacy pipeline, you can use the following the code:
[components.ner]
source = "/path/to/model-best"
[initialize]
vectors = ${paths.vectors}
[initialize.before_init]
#callbacks: "spacy.copy_from_base_model.v1"
tokenizer: "/path/to/model-best"
vocab: "/path/to/model-best"

Postfix - Rate limit for all domains

My client is a travel agency and has thousands of opted-in emails (no spam). Every week they send a message to all customers once, but I'm having problems with some providers and want to rate limit the entire server, for all outgoing domains.
I found some configurations to limit one message per domain per minute. I know how to create extra configuration (using transport file), to increase the numbers for some domains.
For some reason, Postfix is not rate-limiting anything :(
smtp_destination_rate_delay = 60s
default_destination_concurrency_limit = 1
smtp_destination_concurrency_limit = 1
smtp_destination_recipient_limit = 1
smtp_initial_destination_concurrency = 1
/etc/postfix/transport is empty
For example, for Hotmail, sending several messages in the same minute:
Jun 21 09:08:16 deres postfix/smtp[9905]: ABFB92165: to=<x#hotmail.com>, relay=hotmail-com.olc.protection.outlook.com[104.47.32.33]:25, delay=14, delays=11/0/0.83/1.4, dsn=2.6.0, status=sent (250 2.6.0 <600ef56d-18e7-4c10-9c7b-d0e5267c5589#SN1NAM01FT007.eop-nam01.prod.protection.outlook.com> [InternalId=9380208589439, Hostname=SN1NAM01HT004.eop-nam01.prod.protection.outlook.com] 442267 bytes in 1.070, 403.463 KB/sec Queued mail for delivery)
Jun 21 09:08:17 deres postfix/smtp[9849]: 59A7E2296: to=<y#hotmail.com>, relay=hotmail-com.olc.protection.outlook.com[104.47.32.33]:25, delay=14, delays=12/0/0.91/1.3, dsn=2.6.0, status=sent (250 2.6.0 <b6edd8d6-1302-45e9-8eef-a80b1d2659cd#SN1NAM01FT054.eop-nam01.prod.protection.outlook.com> [InternalId=9882719764030, Hostname=SN1NAM01HT090.eop-nam01.prod.protection.outlook.com] 442313 bytes in 0.689, 626.054 KB/sec Queued mail for delivery)
Jun 21 09:08:18 deres postfix/smtp[9836]: 0D0C122E7: to=<z#hotmail.com>, relay=hotmail-com.olc.protection.outlook.com[104.47.32.33]:25, delay=14, delays=12/0/0.76/1.5, dsn=2.6.0, status=sent (250 2.6.0 <8bd2a3d4-d20b-44f0-a90e-fc060b83380b#SN1NAM01FT014.eop-nam01.prod.protection.outlook.com> [InternalId=9354438785755, Hostname=SN1NAM01HT074.eop-nam01.prod.protection.outlook.com] 442283 bytes in 1.031, 418.834 KB/sec Queued mail for delivery)
The only thing (that I don't believe can impact) is that the customer sends the messages using an alternate port (5544 instead of 25/465/587).
Any ideia? Thank you.
Edit: as requested, my config.
alias_database = hash:/etc/aliases
alias_maps = hash:/etc/aliases
bounce_queue_lifetime = 1h
broken_sasl_auth_clients = yes
command_directory = /usr/sbin
config_directory = /etc/postfix
daemon_directory = /usr/libexec/postfix
debug_peer_level = 2
debugger_command = PATH=/bin:/usr/bin:/usr/local/bin:/usr/X11R6/bin xxgdb $daemon_directory/$process_name $process_id & sleep 5
default_destination_concurrency_limit = 1
delay_warning_time = 4
disable_vrfy_command = yes
dovecot_destination_recipient_limit = 1
html_directory = no
inet_interfaces = all
inet_protocols = all
mail_owner = postfix
mailq_path = /usr/bin/mailq.postfix
manpage_directory = /usr/share/man
maximal_backoff_time = 40m
maximal_queue_lifetime = 24h
message_size_limit = 26214400
minimal_backoff_time = 15m
mydestination = localhost.$mydomain, localhost
mydomain = deres.domain.com.br
myhostname = deres.domain.com.br
mynetworks = all
newaliases_path = /usr/bin/newaliases.postfix
queue_directory = /var/spool/postfix
queue_run_delay = 15m
readme_directory = /usr/share/doc/postfix-2.2.2/README_FILES
recipient_delimiter = +
relay_domains = proxy:mysql:/etc/sentora/configs/postfix/mysql-relay_domains_maps.cf
sample_directory = /usr/share/doc/postfix-2.2.2/samples
sendmail_path = /usr/sbin/sendmail.postfix
setgid_group = postdrop
smtp_defer_if_no_mx_address_found = no
smtp_destination_concurrency_limit = 1
smtp_destination_rate_delay = 55s
smtp_destination_recipient_limit = 1
smtp_initial_destination_concurrency = 1
smtp_tls_note_starttls_offer = yes
smtp_tls_session_cache_database = btree:$data_directory/smtp_tls_session_cache
smtp_use_tls = yes
smtpd_client_restrictions =
smtpd_data_restrictions = reject_unauth_pipelining,permit_sasl_authenticated
smtpd_discard_ehlo_keywords = silent-discard,dsn
smtpd_helo_required = yes
smtpd_helo_restrictions =
smtpd_recipient_restrictions = permit_sasl_authenticated, permit_mynetworks, reject_unauth_destination, reject_non_fqdn_sender, reject_non_fqdn_recipient, reject_unknown_recipient_domain ,reject_rbl_client zen.spamhaus.org ,reject_rbl_client bl.spamcop.net ,reject_rbl_client b.barracudacentral.org
smtpd_sasl_auth_enable = yes
smtpd_sasl_local_domain = $myhostname
smtpd_sasl_path = private/auth
smtpd_sasl_security_options = noanonymous
smtpd_sasl_type = dovecot
smtpd_sender_restrictions =
smtpd_tls_CAfile = /etc/letsencrypt/live/ca.pem
smtpd_tls_auth_only = no
smtpd_tls_cert_file = /etc/letsencrypt/live/...removed
smtpd_tls_key_file = /etc/letsencrypt/live/...removed
smtpd_tls_loglevel = 1
smtpd_tls_received_header = yes
smtpd_tls_security_level = may
smtpd_tls_session_cache_timeout = 3600s
smtpd_use_tls = yes
soft_bounce = no
tls_random_source = dev:/dev/urandom
transport_maps = hash:/etc/postfix/transport
unknown_address_reject_code = 554
unknown_hostname_reject_code = 554
unknown_local_recipient_reject_code = 550
virtual_alias_maps = proxy:mysql:/etc/sentora/configs/postfix/mysql-virtual_alias_maps.cf, regexp:/etc/sentora/configs/postfix/virtual_regexp
virtual_gid_maps = static:12
virtual_mailbox_base = /var/sentora/vmail
virtual_mailbox_domains = proxy:mysql:/etc/sentora/configs/postfix/mysql-virtual_domains_maps.cf
virtual_mailbox_maps = proxy:mysql:/etc/sentora/configs/postfix/mysql-virtual_mailbox_maps.cf
virtual_minimum_uid = 101
virtual_transport = dovecot
virtual_uid_maps = static:101
Solved.
Fixed with:
default_destination_recipient_limit = 2
smtp_destination_recipient_limit = 2
Sorry, I misread your question. You WANT rate limiting but don't seem to be getting it. Correct?
The settings you showed should work. Have you done a postfix reload? PF won't pick up changes in it's config file without a reload or restart.
If that doesn't do it, do a postconf -n, which will show the settings that it thinks you have in your config file, and post it here.
If what it thinks you set and what you actually set are different, you'll need to do some detective work to see where it's getting settings from.

Error when connecting Cygnus and STH

I am trying to connect Cygnus and STH. I have a Cygnus instance in a container docker and a STH instance in a VM.
When I send a notification to Cygnus, available at fiware-cygnus/cygnus-ngsi/resources/ngsi-examples/notification-json-simple.sh it shows me the following error:
NOTE: I replaced the STH IP to post the question
time=2017-03-06T17:35:46.522Z | lvl=ERROR | corr=0767a9d2-d44e-4872-9ef4-b57d68ef7f88 | trans=0767a9d2-d44e-4872-9ef4-b57d68ef7f88 | srv=red | subsrv=/red/red | comp=cygnus-ngsi | op=processRollbackedBatches | msg=com.telefonica.iot.cygnus.sinks.NGSISink[394] : Persistence error. Message: -, Timed out after 30000 ms while waiting for a server that matches PrimaryServerSelector. Client view of cluster state is {type=UNKNOWN, servers=[{address=:27017, type=UNKNOWN, state=CONNECTING, exception={com.mongodb.MongoSocketReadException: Exception receiving message}, caused by {java.net.SocketException: Connection reset}}], Stack trace: [com.telefonica.iot.cygnus.sinks.NGSISTHSink.persistOne(NGSISTHSink.java:158), com.telefonica.iot.cygnus.sinks.NGSISTHSink.persistBatch(NGSISTHSink.java:93), com.telefonica.iot.cygnus.sinks.NGSISink.processRollbackedBatches(NGSISink.java:387), com.telefonica.iot.cygnus.sinks.NGSISink.process(NGSISink.java:370), org.apache.flume.sink.DefaultSinkProcessor.process(DefaultSinkProcessor.java:68), org.apache.flume.SinkRunner$PollingRunner.run(SinkRunner.java:147), java.lang.Thread.run(Thread.java:745)]
time=2017-03-06T17:35:46.522Z | lvl=INFO | corr=0767a9d2-d44e-4872-9ef4-b57d68ef7f88 | trans=0767a9d2-d44e-4872-9ef4-b57d68ef7f88 | srv=red | subsrv=/red/red | comp=cygnus-ngsi | op=doRollbackAgain | msg=com.telefonica.iot.cygnus.sinks.NGSISink[458] : Rollbacking again (0767a9d2-d44e-4872-9ef4-b57d68ef7f88), this was retry #9
And nothing is posted in STH.
In my agent.conf (cygnus conf) I have this:
cygnus-ngsi.sources = http-source
cygnus-ngsi.sinks = sth-sink
cygnus-ngsi.channels = sth-channel
cygnus-ngsi.sources.http-source.type = org.apache.flume.source.http.HTTPSource
cygnus-ngsi.sources.http-source.channels = sth-channel
cygnus-ngsi.sources.http-source.port = 5050
cygnus-ngsi.sources.http-source.handler = com.telefonica.iot.cygnus.handlers.NGSIRestHandler
cygnus-ngsi.sources.http-source.handler.notification_target = /notify
cygnus-ngsi.sources.http-source.handler.default_service = default
cygnus-ngsi.sources.http-source.handler.default_service_path = /
cygnus-ngsi.sources.http-source.interceptors = ts gi
cygnus-ngsi.sources.http-source.interceptors.ts.type = timestamp
cygnus-ngsi.sources.http-source.interceptors.gi.type = com.telefonica.iot.cygnus.interceptors.NGSIGroupingInterceptor$Builder
cygnus-ngsi.sources.http-source.interceptors.gi.grouping_rules_conf_file = /opt/apache-flume/conf/grouping_rules.conf
cygnus-ngsi.sinks.sth-sink.type = com.telefonica.iot.cygnus.sinks.NGSISTHSink
cygnus-ngsi.sinks.sth-sink.channel = sth-channel
#cygnus-ngsi.sinks.sth-sink.enable_encoding = false
#cygnus-ngsi.sinks.sth-sink.enable_grouping = false
#cygnus-ngsi.sinks.sth-sink.enable_name_mappings = false
#cygnus-ngsi.sinks.sth-sink.enable_lowercase = false
cygnus-ngsi.sinks.sth-sink.data_model = dm-by-entity
cygnus-ngsi.sinks.sth-sink.mongo_hosts = <STH-MONGO-IP>:27017
cygnus-ngsi.sinks.sth-sink.mongo_username =
cygnus-ngsi.sinks.sth-sink.mongo_password =
cygnus-ngsi.sinks.sth-sink.db_prefix = sth_
cygnus-ngsi.sinks.sth-sink.collection_prefix = sth_
cygnus-ngsi.sinks.sth-sink.resolutions = day,hour,minute
#cygnus-ngsi.sinks.sth-sink.batch_size = 1
#cygnus-ngsi.sinks.sth-sink.batch_timeout = 30
#cygnus-ngsi.sinks.sth-sink.batch_ttl = 10
#cygnus-ngsi.sinks.sth-sink.data_expiration = 0
#cygnus-ngsi.sinks.sth-sink.ignore_white_spaces = true
cygnus-ngsi.channels.sth-channel.type = com.telefonica.iot.cygnus.channels.CygnusMemoryChannel
cygnus-ngsi.channels.sth-channel.capacity = 1000
cygnus-ngsi.channels.sth-channel.transactionCapacity = 100
And my STH config is:
var config = {};
// STH server configuration
//--------------------------
config.server = {
host: '10.0.2.15',
port: '8666',
defaultService: 'testservice',
defaultServicePath: '/testservicepath',
filterOutEmpty: 'true',
aggregationBy: ['day', 'hour', 'minute'],
temporalDir: 'temp'
};
// Database configuration
//------------------------
config.database = {
dataModel: 'collection-per-entity',
user: '',
password: '',
URI: 'localhost:27017',
replicaSet: '',
prefix: 'sth_',
collectionPrefix: 'sth_',
poolSize: '5',
shouldStore: 'both',
truncation: {
expireAfterSeconds: '0',
size: '0',
max: '0'
},
ignoreBlankSpaces: 'true',
nameMapping: {
enabled: 'false',
configFile: './name-mapping.json'
},
nameEncoding: 'false'
};
// Logging configuration
//------------------------
config.logging = {
level: 'info',
NODE_ENV variable is set to 'development'.
format: 'pipe',
proofOfLifeInterval: '60'
};
module.exports = config;
I found the solution for this problem: I just edited my /etc/mongod.conf and I commented out the bindip=127.0.0.1 to listen on all interfaces.

Remote enable xdebug in PHPStorm for console scripts

How can I use xdebug on console scripts with PHPStorm 9 without having to enable xdebug.remote_autostart?
I've found that if I have xdebug.remote_autostart enabled, it hangs certain console scripts trying to do a DNS lookup.
I'm connecting to a vagrant box.
Here is my current Xdebug config:
zend_extension = xdebug.so
xdebug.var_display_max_data = -1
xdebug.var_display_max_depth = 10
xdebug.default_enable = 0
xdebug.remote_enable = 1
xdebug.remote_connect_back = 1
xdebug.remote_port = 9001
xdebug.remote_host = 192.168.10.1
xdebug.remote_autostart = 0
xdebug.remote_handler = dbgp
xdebug.idekey = PHPSTORM
xdebug.collect_return = 1
xdebug.collect_params = 10
xdebug.show_mem_delta = 1
xdebug.profiler_enable_trigger = 1
xdebug.profiler_output_dir = "/home/vagrant/Projects/xdebug_profiler"
xdebug.trace_enable_trigger = 1
xdebug.trace_output_dir = "/home/vagrant/Projects/xdebug_traces"
xdebug.profiler_enable = 0
xdebug.trace_enable = 1

doxygen creates empty data for Tcl code file

I am using the standard version in RH5.5
These are the only tcl file (sample.tcl) contents:
##general procs used for CCK coding
#use set varname rather than return $varname
#\code
##Documented proc DlgLgrep \c
#accepts a list, and an expression. Returns a list of all elements for list,which match expression
#The expression is carried out in the DlgLgrep contect and local namespace. You can, of course use
#upvar and uplevel within your code.
#for example, let us say that you want numbers that are above 3
#puts [ DlgLgrep { $grepy > 3 } { 1 4 0 8 -2 } ]
#will yield
#4 8
proc DlgLgrep { expry listy } {
#accepts a list, and an expression. Returns a list of all elements for list,which match expression
#The expression is carried out in the DlgLgrep contect and local namespace. You can, of course use
#upvar and uplevel within your code.
#for example, let us say that you want numbers that are above 3
#puts [ DlgLgrep { $grepy > 3 } { 1 4 0 8 -2 } ]
#will yield
#4 8
set ret {}
foreach grepy $listy {
if { [ expr $expry ] } {
lappend ret $grepy
}
}
return $ret
}
#\endcode
These are the doxygen file contents:
PROJECT_NAME = CCK_DLG
PROJECT_NUMBER =
OUTPUT_DIRECTORY = ./
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
USE_WINDOWS_ENCODING = NO
BRIEF_MEMBER_DESC = YES
REPEAT_BRIEF = YES
ABBREVIATE_BRIEF =
ALWAYS_DETAILED_SEC = NO
INLINE_INHERITED_MEMB = NO
FULL_PATH_NAMES = YES
STRIP_FROM_PATH =
STRIP_FROM_INC_PATH =
SHORT_NAMES = NO
JAVADOC_AUTOBRIEF = NO
MULTILINE_CPP_IS_BRIEF = NO
DETAILS_AT_TOP = NO
INHERIT_DOCS = YES
SEPARATE_MEMBER_PAGES = NO
TAB_SIZE = 8
ALIASES =
OPTIMIZE_OUTPUT_FOR_C = NO
OPTIMIZE_OUTPUT_JAVA = NO
BUILTIN_STL_SUPPORT = NO
DISTRIBUTE_GROUP_DOC = NO
SUBGROUPING = YES
EXTRACT_ALL = YES
EXTRACT_PRIVATE = NO
EXTRACT_STATIC = NO
EXTRACT_LOCAL_CLASSES = YES
EXTRACT_LOCAL_METHODS = NO
HIDE_UNDOC_MEMBERS = NO
HIDE_UNDOC_CLASSES = NO
HIDE_FRIEND_COMPOUNDS = NO
HIDE_IN_BODY_DOCS = NO
INTERNAL_DOCS = NO
CASE_SENSE_NAMES = YES
HIDE_SCOPE_NAMES = NO
SHOW_INCLUDE_FILES = YES
INLINE_INFO = YES
SORT_MEMBER_DOCS = YES
SORT_BRIEF_DOCS = NO
SORT_BY_SCOPE_NAME = NO
GENERATE_TODOLIST = YES
GENERATE_TESTLIST = YES
GENERATE_BUGLIST = YES
GENERATE_DEPRECATEDLIST= YES
ENABLED_SECTIONS =
MAX_INITIALIZER_LINES = 30
SHOW_USED_FILES = YES
SHOW_DIRECTORIES = NO
FILE_VERSION_FILTER =
QUIET = NO
WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
WARN_IF_DOC_ERROR = YES
WARN_NO_PARAMDOC = NO
WARN_FORMAT = "$file:$line: $text"
WARN_LOGFILE =
INPUT = ./
FILE_PATTERNS =
RECURSIVE = NO
EXCLUDE = *.tcl
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS =
EXAMPLE_PATH =
EXAMPLE_PATTERNS =
EXAMPLE_RECURSIVE = NO
IMAGE_PATH =
INPUT_FILTER =
FILTER_PATTERNS =
FILTER_SOURCE_FILES = NO
SOURCE_BROWSER = NO
INLINE_SOURCES = NO
STRIP_CODE_COMMENTS = YES
REFERENCED_BY_RELATION = YES
REFERENCES_RELATION = YES
REFERENCES_LINK_SOURCE = YES
USE_HTAGS = NO
VERBATIM_HEADERS = YES
ALPHABETICAL_INDEX = NO
COLS_IN_ALPHA_INDEX = 5
IGNORE_PREFIX =
GENERATE_HTML = YES
HTML_OUTPUT = html
HTML_FILE_EXTENSION = .html
HTML_HEADER =
HTML_FOOTER =
HTML_STYLESHEET =
HTML_ALIGN_MEMBERS = YES
GENERATE_HTMLHELP = NO
CHM_FILE =
HHC_LOCATION =
GENERATE_CHI = NO
BINARY_TOC = NO
TOC_EXPAND = NO
DISABLE_INDEX = NO
ENUM_VALUES_PER_LINE = 4
GENERATE_TREEVIEW = NO
TREEVIEW_WIDTH = 250
GENERATE_LATEX = YES
LATEX_OUTPUT = latex
LATEX_CMD_NAME = latex
MAKEINDEX_CMD_NAME = makeindex
COMPACT_LATEX = NO
PAPER_TYPE = a4wide
EXTRA_PACKAGES =
LATEX_HEADER =
PDF_HYPERLINKS = NO
USE_PDFLATEX = NO
LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
GENERATE_RTF = NO
RTF_OUTPUT = rtf
COMPACT_RTF = NO
RTF_HYPERLINKS = NO
RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
GENERATE_MAN = NO
MAN_OUTPUT = man
MAN_EXTENSION = .3
MAN_LINKS = NO
GENERATE_XML = NO
XML_OUTPUT = xml
XML_SCHEMA =
XML_DTD =
XML_PROGRAMLISTING = YES
GENERATE_AUTOGEN_DEF = NO
GENERATE_PERLMOD = NO
PERLMOD_LATEX = NO
PERLMOD_PRETTY = YES
PERLMOD_MAKEVAR_PREFIX =
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = NO
EXPAND_ONLY_PREDEF = NO
SEARCH_INCLUDES = YES
INCLUDE_PATH =
INCLUDE_FILE_PATTERNS =
PREDEFINED =
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
TAGFILES =
GENERATE_TAGFILE =
ALLEXTERNALS = NO
EXTERNAL_GROUPS = YES
PERL_PATH = /usr/bin/perl
CLASS_DIAGRAMS = YES
HIDE_UNDOC_RELATIONS = YES
HAVE_DOT = NO
CLASS_GRAPH = YES
COLLABORATION_GRAPH = YES
GROUP_GRAPHS = YES
UML_LOOK = NO
TEMPLATE_RELATIONS = NO
INCLUDE_GRAPH = YES
INCLUDED_BY_GRAPH = YES
CALL_GRAPH = NO
CALLER_GRAPH = NO
GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
DOT_IMAGE_FORMAT = png
DOT_PATH =
DOTFILE_DIRS =
MAX_DOT_GRAPH_WIDTH = 1024
MAX_DOT_GRAPH_HEIGHT = 1024
MAX_DOT_GRAPH_DEPTH = 0
DOT_TRANSPARENT = NO
DOT_MULTI_TARGETS = NO
GENERATE_LEGEND = YES
DOT_CLEANUP = YES
SEARCHENGINE = NO
this is a small excerpt, of course. I am a newbie, and no-one where I work has done any doxygen work.
Thanks!