import requests
url = "https://splunkbase.splunk.com/api/v1/app/"
limit = 1
url2= "https://splunkbase.splunk.com/api/v1/app/"
with open(r"C:\Users\denis.zarfin\PycharmProjects\pythonProject2\main....
See more...
import requests
url = "https://splunkbase.splunk.com/api/v1/app/"
limit = 1
url2= "https://splunkbase.splunk.com/api/v1/app/"
with open(r"C:\Users\denis.zarfin\PycharmProjects\pythonProject2\main.txt", 'w') as f:
f.write("name" + ", " + "uid" + ", " + "title" + ", " +'\n')
offset = -1
all_numbers = []
try :
while True:
offset += 1
try:
response = requests.get(url, params={"limit": limit, "offset": offset})
data = response.json()
for i in data["results"]:
url2 = str(url2) + str(i["uid"]) + "/release/"
response2 = requests.get(url2)
data2 = response2.json()
data2 = data2[0:1]
for j in data2:
a = str(j["name"])
b = str(i["uid"])
c = str(i["title"])
with open(r"C:\Users\denis.zarfin\PycharmProjects\pythonProject2\main.txt", 'a') as f:
f.write(a + ", " + b + ", " + c + ", " +'\n')
url2 = "https://splunkbase.splunk.com/api/v1/app/"
except:
pass
print(offset, a, b, c)
if offset > 2700:
break
except:
pass
print("ok") That one exports the results to CSV... but it's not that good. In the end I want to get 2 JSONs I was able to do it "manually" with: import json
import requests
result = []
for app_id in range(0, 1, 1):
url = f'https://splunkbase.splunk.com/api/v1/app/?offset={app_id}&limit=1'
data = requests.get(url).json()
print(f'Name: {data["results"]['uid']}')