<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Splunk Python SDK very Slow in Splunk Dev</title>
    <link>https://community.splunk.com/t5/Splunk-Dev/Splunk-Python-SDK-very-Slow/m-p/195148#M2627</link>
    <description>&lt;P&gt;Here's what my job is set up like:&lt;/P&gt;

&lt;PRE&gt;&lt;CODE&gt;# Set count to 0 to return all events in last minute
kwargs = {
        "exec_mode": "blocking",
        "earliest_time": "-1h",
        "latest_time": "now",
        "count": 0
        }

# Create jobs for both queries
logging.info("Creating jobs...")
atlas = service.jobs.create(atlas_query, **kwargs)

# Set unlimited results to return
result_kwargs = { "count": 0 }

# Parse results
atlas_reader = results.ResultsReader(atlas.results(**result_kwargs))
&lt;/CODE&gt;&lt;/PRE&gt;

&lt;P&gt;However, I'm having problems with the data from the atlas_reader object. It takes several minutes to loop through it and save certain fields to a CSV file, even though there's only a few thousand events. The job itself finishes in about 5 seconds. I have no idea what is wrong.&lt;/P&gt;</description>
    <pubDate>Tue, 07 Jul 2015 20:10:39 GMT</pubDate>
    <dc:creator>jananth1</dc:creator>
    <dc:date>2015-07-07T20:10:39Z</dc:date>
    <item>
      <title>Splunk Python SDK very Slow</title>
      <link>https://community.splunk.com/t5/Splunk-Dev/Splunk-Python-SDK-very-Slow/m-p/195148#M2627</link>
      <description>&lt;P&gt;Here's what my job is set up like:&lt;/P&gt;

&lt;PRE&gt;&lt;CODE&gt;# Set count to 0 to return all events in last minute
kwargs = {
        "exec_mode": "blocking",
        "earliest_time": "-1h",
        "latest_time": "now",
        "count": 0
        }

# Create jobs for both queries
logging.info("Creating jobs...")
atlas = service.jobs.create(atlas_query, **kwargs)

# Set unlimited results to return
result_kwargs = { "count": 0 }

# Parse results
atlas_reader = results.ResultsReader(atlas.results(**result_kwargs))
&lt;/CODE&gt;&lt;/PRE&gt;

&lt;P&gt;However, I'm having problems with the data from the atlas_reader object. It takes several minutes to loop through it and save certain fields to a CSV file, even though there's only a few thousand events. The job itself finishes in about 5 seconds. I have no idea what is wrong.&lt;/P&gt;</description>
      <pubDate>Tue, 07 Jul 2015 20:10:39 GMT</pubDate>
      <guid>https://community.splunk.com/t5/Splunk-Dev/Splunk-Python-SDK-very-Slow/m-p/195148#M2627</guid>
      <dc:creator>jananth1</dc:creator>
      <dc:date>2015-07-07T20:10:39Z</dc:date>
    </item>
    <item>
      <title>Re: Splunk Python SDK very Slow</title>
      <link>https://community.splunk.com/t5/Splunk-Dev/Splunk-Python-SDK-very-Slow/m-p/195149#M2628</link>
      <description>&lt;P&gt;This is basically what I do, never had a speed problem.  Dumps the results into a list of json objects.  I send  this data to a web page for display.&lt;/P&gt;

&lt;PRE&gt;&lt;CODE&gt;service = client.connect(
    host=HOST,
    port=PORT,
    username=USERNAME,
    password=PASSWORD
)

search_query = "search * | head 10"
kwargs_normalsearch = {"exec_mode": "normal", "earliest_time": "-5m@m",
                         "latest_time": "-1m@m"}

job = service.jobs.create(search_query, **kwargs_normalsearch)

while True:
    job.refresh()
    if job["isDone"] == "1":
        print job["sid"]
        print job["runDuration"]
        break

resultCount = job["resultCount"]
offset = 0
count = 100
data = []

if int(resultCount) &amp;gt; 0:
    while offset &amp;lt; int(resultCount):
        kwargs_paginate = {"count": count, "offset": offset}
        page = job.results(**kwargs_paginate)

        for result in results.ResultsReader(page):
            d = {}
            for r in result:
                # print r , result[r]
                d[r] = result[r]
            data.append(d)
        offset += count

for result in data:
    print result
&lt;/CODE&gt;&lt;/PRE&gt;</description>
      <pubDate>Fri, 16 Sep 2016 17:54:06 GMT</pubDate>
      <guid>https://community.splunk.com/t5/Splunk-Dev/Splunk-Python-SDK-very-Slow/m-p/195149#M2628</guid>
      <dc:creator>thomrs</dc:creator>
      <dc:date>2016-09-16T17:54:06Z</dc:date>
    </item>
  </channel>
</rss>

