Here's what my job is set up like:
# Set count to 0 to return all events in last minute
kwargs = {
"exec_mode": "blocking",
"earliest_time": "-1h",
"latest_time": "now",
"count": 0
}
# Create jobs for both queries
logging.info("Creating jobs...")
atlas = service.jobs.create(atlas_query, **kwargs)
# Set unlimited results to return
result_kwargs = { "count": 0 }
# Parse results
atlas_reader = results.ResultsReader(atlas.results(**result_kwargs))
However, I'm having problems with the data from the atlas_reader object. It takes several minutes to loop through it and save certain fields to a CSV file, even though there's only a few thousand events. The job itself finishes in about 5 seconds. I have no idea what is wrong.
This is basically what I do, never had a speed problem. Dumps the results into a list of json objects. I send this data to a web page for display.
service = client.connect(
host=HOST,
port=PORT,
username=USERNAME,
password=PASSWORD
)
search_query = "search * | head 10"
kwargs_normalsearch = {"exec_mode": "normal", "earliest_time": "-5m@m",
"latest_time": "-1m@m"}
job = service.jobs.create(search_query, **kwargs_normalsearch)
while True:
job.refresh()
if job["isDone"] == "1":
print job["sid"]
print job["runDuration"]
break
resultCount = job["resultCount"]
offset = 0
count = 100
data = []
if int(resultCount) > 0:
while offset < int(resultCount):
kwargs_paginate = {"count": count, "offset": offset}
page = job.results(**kwargs_paginate)
for result in results.ResultsReader(page):
d = {}
for r in result:
# print r , result[r]
d[r] = result[r]
data.append(d)
offset += count
for result in data:
print result