<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Python SDK dbxquery results limited to 100k rows using jobs.export- Do I need to paginate streaming results? in Splunk Dev</title>
    <link>https://community.splunk.com/t5/Splunk-Dev/Python-SDK-dbxquery-results-limited-to-100k-rows-using-jobs/m-p/608751#M10731</link>
    <description>&lt;P&gt;Running a dbxquery through jobs.export my results are limited to 100k rows. Do I need to paginate streaming results?&amp;nbsp;&lt;/P&gt;
&lt;P&gt;Here's my code:&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;LI-CODE lang="python"&gt;data = {
        'adhoc_search_level': 'fast',
        'search_mode': 'normal',
        'preview': False,
        'max_count': 500000,
        'output_mode': 'json',
        'auto_cancel': 300,
        'count': 0
    }

job = service.jobs.export(&amp;lt;dbxquery&amp;gt;, **data)
reader = results.JSONResultsReader(job)
lst = [result for result in reader if isinstance(result, dict)]&lt;/LI-CODE&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;This runs correctly except that that results are always stopped at 100k rows, it should be over 200k.&lt;/P&gt;</description>
    <pubDate>Tue, 09 Aug 2022 14:56:25 GMT</pubDate>
    <dc:creator>joecav</dc:creator>
    <dc:date>2022-08-09T14:56:25Z</dc:date>
    <item>
      <title>Python SDK dbxquery results limited to 100k rows using jobs.export- Do I need to paginate streaming results?</title>
      <link>https://community.splunk.com/t5/Splunk-Dev/Python-SDK-dbxquery-results-limited-to-100k-rows-using-jobs/m-p/608751#M10731</link>
      <description>&lt;P&gt;Running a dbxquery through jobs.export my results are limited to 100k rows. Do I need to paginate streaming results?&amp;nbsp;&lt;/P&gt;
&lt;P&gt;Here's my code:&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;LI-CODE lang="python"&gt;data = {
        'adhoc_search_level': 'fast',
        'search_mode': 'normal',
        'preview': False,
        'max_count': 500000,
        'output_mode': 'json',
        'auto_cancel': 300,
        'count': 0
    }

job = service.jobs.export(&amp;lt;dbxquery&amp;gt;, **data)
reader = results.JSONResultsReader(job)
lst = [result for result in reader if isinstance(result, dict)]&lt;/LI-CODE&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;
&lt;P&gt;This runs correctly except that that results are always stopped at 100k rows, it should be over 200k.&lt;/P&gt;</description>
      <pubDate>Tue, 09 Aug 2022 14:56:25 GMT</pubDate>
      <guid>https://community.splunk.com/t5/Splunk-Dev/Python-SDK-dbxquery-results-limited-to-100k-rows-using-jobs/m-p/608751#M10731</guid>
      <dc:creator>joecav</dc:creator>
      <dc:date>2022-08-09T14:56:25Z</dc:date>
    </item>
  </channel>
</rss>

