Hope everyone else is doing well. We have been stable on eNcore 4.8.2 (for a while now), but ONLY with the following changes to estreamer.conf: Respond timeout Always attempt to continue Extended requests batch size # "batchSize":2, number of workers # "workerProcesses": 1 These are changes we made as directed by Cisco. For example, we lowered the workerProcesses to avoid issues (they said because our ingest was low, more processes would cause issues), not because we didn't have enough CPUs. # estreamer.conf
{
"connectTimeout": 10,
"alwaysAttemptContinue": true,
"batchSize":2,
"enabled": true,
"handler": {
"output@comment": "If you disable all outputters it behaves as a sink",
"outputters": [
{
"adapter": "splunk",
"enabled": true,
"stream": {
"options": {
"maxLogs": 10000,
"rotate": true
},
"uri": "relfile:///data/splunk/encore.{0}.log"
}
}
],
"records": {
"connections": false,
"core": true,
"excl@comment": [
"These records will be excluded regardless of above (overrides 'include')",
"e.g. to exclude flow and IPS events use [ 71, 400 ]"
],
"exclude": [],
"inc@comment": "These records will be included regardless of above",
"include": [],
"intrusion": true,
"metadata": true,
"packets": true,
"rna": true,
"rua": true
}
},
"logging": {
"filepath": "estreamer.log",
"format": "%(asctime)s %(name)-12s %(levelname)-8s %(message)s",
"lev@comment": "Levels include FATAL, ERROR, WARNING, INFO, DEBUG, VERBOSE and TRACE",
"level": "INFO",
"stdOut": true
},
"monitor": {
"bookmark": false,
"handled": true,
"period": 120,
"subscribed": true,
"velocity": false
},
"responseTimeout": 30,
"star@comment": "0 for genesis, 1 for now, 2 for bookmark",
"start": 2,
"subscription": {
"records": {
"@comment": [
"Just because we subscribe doesn't mean the server is sending. Nor does it mean",
"we are writing the records either. See handler.records[]"
],
"archiveTimestamps": true,
"eventExtraData": true,
"extended": true,
"impactEventAlerts": true,
"intrusion": true,
"metadata": true,
"packetData": true
},
"servers": [
{
<obviously omitted>
}
]
},
"workerProcesses": 1
}
... View more