-
Notifications
You must be signed in to change notification settings - Fork 0
/
load_ensembl_annotation_PROD.py
151 lines (121 loc) · 5.42 KB
/
load_ensembl_annotation_PROD.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
import requests
import bs4
import os
from dotenv import load_dotenv
# connect to our ES cluster
from elasticsearch import Elasticsearch
from elasticsearch import RequestsHttpConnection
# load .env variables
load_dotenv()
# read from k8s secrets
ES_USERNAME = os.getenv('ES_USER')
ES_PASSWORD = os.getenv('ES_PASSWORD')
ES_HOST = os.environ.get("ES_NODE")
es = Elasticsearch([ES_HOST],
connection_class=RequestsHttpConnection,
http_auth=(ES_USERNAME, ES_PASSWORD),
use_ssl=True, verify_certs=False,
ssl_show_warn=False)
def scrape_data(project_name):
result = ''
if project_name == 'GENE-SWitCH':
result = requests.get('https://projects.ensembl.org/gene-switch/')
if project_name == 'AQUA-FAANG':
result = requests.get('https://projects.ensembl.org/aqua-faang/')
if project_name == 'BovReg':
result = requests.get('https://projects.ensembl.org/bovreg/')
if result:
soup = bs4.BeautifulSoup(result.text, "lxml")
table = soup.find('table', class_='table_zebra')
# Collecting data
for row in table.tbody.find_all('tr'):
# Find all data for each column
columns = row.find_all('td')
if (columns != []):
species = columns[0].text.strip()
accession = columns[1].text.strip()
assembly_submitter = columns[2].text.strip()
# annotation column
annotation_list = []
annotation_spans = columns[3].find_all('span')
for span in annotation_spans:
annotation_list.append({"annotation": span.a.text.strip(),
"fileUrl": span.a['href']
})
# proteins column
proteins_list = []
protein_links = columns[4].find_all('a')
for a in protein_links:
proteins_list.append({"fileType": a.text.strip(),
"fileUrl": a['href']
})
# transcripts column
transcripts_list = []
transcripts_links = columns[5].find_all('a')
for a in transcripts_links:
transcripts_list.append({"fileType": a.text.strip(),
"fileUrl": a['href']
})
# softmaskedgenome column
softmaskedgenome_list = []
softmaskedgenome_links = columns[6].find_all('a')
for a in softmaskedgenome_links:
softmaskedgenome_list.append({"fileType": a.text.strip(),
"fileUrl": a['href']
})
# repeatlibrary column
repeatlibrary_list = []
repeatlibrary_links = columns[7].find_all('a')
for a in repeatlibrary_links:
repeatlibrary_list.append({"library": a.text.strip(),
"fileUrl": a['href']
})
# otherdata column
otherdata_list = []
otherdata_links = columns[8].find_all('a')
for a in otherdata_links:
otherdata_list.append({"otherData": a.text.strip(),
"fileUrl": a['href']
})
# view_in_browser column
browserview_list = []
browserview_links = columns[9].find_all('a')
for a in browserview_links:
browserview_list.append({"browserView": a.text.strip(),
"fileUrl": a['href']
})
# create a document to upload
data = {'species': species,
'accession': accession,
'assembly_submitter': assembly_submitter,
'annotation': annotation_list,
'proteins': proteins_list,
'transcripts': transcripts_list,
'softmasked_genome': softmaskedgenome_list,
'repeat_library': repeatlibrary_list,
'other_data': otherdata_list,
'browser_view': browserview_list,
'project': project_name}
# load document in index
load_ensembl_annotation(data)
def load_ensembl_annotation(data):
try:
# add document to index
res = es.index(index='ensembl_annotation', doc_type="_doc", id=f"{data['species']}-{data['accession']}", body=data)
print(f"{data['species']}-{res['result']}")
except:
print("Issue with loading annotation")
def empty_ensembl_annotation():
try:
es.delete_by_query(index='ensembl_annotation', body={"query": {"match_all": {}}})
except:
print("Issue with emptying ensembl_annotation index")
def main():
print("koosum")
print(ES_HOST)
empty_ensembl_annotation()
scrape_data('GENE-SWitCH')
scrape_data('AQUA-FAANG')
scrape_data('BovReg')
if __name__ == '__main__':
main()