Skip to content

Instantly share code, notes, and snippets.

@kamwoh
Created June 15, 2021 05:57
Show Gist options
  • Select an option

  • Save kamwoh/6a212723713e796c6679d59e94dde18f to your computer and use it in GitHub Desktop.

Select an option

Save kamwoh/6a212723713e796c6679d59e94dde18f to your computer and use it in GitHub Desktop.
a python script to search publication record based on publication names
from scholarly import scholarly
listpubs = """
Embedding watermarks into deep neural networks
Turning your weakness into a strength: Watermarking deep neural networks by backdooring
DeepMarks: A Digital Fingerprinting Framework for Deep Neural Networks
Protecting intellectual property of deep neural networks with watermarking
DeepSigns: A Generic Watermark- ing Framework for IP Protection of Deep Learning Models
Adversarial Frontier Stitching for Remote Neural Network Watermarking
Watermarking deep neural networks for embedded systems
Zero-knowledge watermark detection resistant to ambiguity attacks
Combatting ambiguity attacks via selective detection of embedded watermarks
Passport-aware normalization for deep model protection
Robust image watermarking based on multiscale gradient direction quantization
Screen-shooting resilient watermarking
An overview of digital video watermarking
Training dnn model with secret key for model protection
Keynet: An asymmetric key-style framework for watermarking deep learning models
A survey on model watermarking neural networks
Identity Bracelets for deep neural network
""".strip().split('\n')
res = []
for pubname in listpubs:
print(f'Searching: {pubname}')
search_query = scholarly.search_pubs(pubname)
res.append({
'pub': pubname,
'res': next(search_query)
})
print(res[-1]['res'])
def get_scholar_url(author_id):
if author_id == '':
return 'no scholar url'
scholar_url = f'https://scholar.google.com/citations?user={author_id}&hl=en'
return scholar_url
df = []
lines = []
maxlen_authors = 0
for pub, query in zip(listpubs, res):
print(pub)
authors = query['res']['bib']['author']
author_ids = query['res']['author_id']
venue = query['res']['bib']['venue']
year = query['res']['bib']['pub_year']
citations = query['res']['num_citations']
pub_url = query['res']['pub_url']
eprint_url = query['res']['eprint_url']
authors_summary = []
author_infos = []
for author, author_id in zip(authors, author_ids):
scholar_url = get_scholar_url(author_id)
authors_summary.append({
'author': author,
'author_id': author_id,
'scholar_url': scholar_url
})
author_infos.append(f'{author},{scholar_url}')
author_infos = ','.join(author_infos)
maxlen_authors = max(maxlen_authors, len(authors))
df.append({
'pub': pub,
'venue': venue,
'year': year,
'num_cites': citations,
'pub_url': pub_url,
'eprint_url': eprint_url
})
lines.append(f'{pub},{venue},{year},{citations},{pub_url},{eprint_url},{author_infos}')
print(lines[-1])
# print(authors, author_id)
commas = ',' * maxlen_authors * 2
with open('ipr list.csv', 'w+') as f:
f.write(f'Pub Name,Venue,Year,Citations,Pub Url,Eprint Url{commas}\n')
for line in lines:
f.write(line)
f.write('\n')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment