dmrids-gd77.py
                        
                             · 3.4 KiB · Python
                        
                    
                    
                      
                        Raw
                      
                    
                      
                    
                        
                          
                        
                    
                    
                
                
                
            #!/usr/bin/env python3
"""Fetches the DMRID database from radioid.net
This script fetches the database of DMRIDs directly from
radioid.net and creates a CSV file to import it into
the GD77 running OpenGD77.
Author: Dominic Reich “OE7DRT”
        <quick.hat4396@qtztsjosmprqmgtunjyf.com>
Usage:
------
    ./dmrids.py
    No arguments are needed. Specify the folders in the
    source below.
"""
import os
from datetime import datetime, timedelta
import requests
import pandas as pd
def main():
    """main function
    Runs if script is run by itself.
    """
    # User configuration --------------------------------
    database_url = 'https://www.radioid.net/static/user.csv'
    local_folder = '/home/dominic/.hamradio/'
    output_filename = 'DMR-IDS-GD77.csv'
    tmpfile = '/tmp/dmrids.download'
    regions = ['232', '262', '263', '264', '228', '222']
    # regions = ['232']
    fav_filename = 'Favorites.txt'
    # blacklist_filename = 'BlacklistedCalls.txt'
    # End of User configuration -------------------------
    if os.path.isfile(local_folder + output_filename) and os.access(local_folder + output_filename, os.R_OK):
        print('Local file found. Will overwrite the file if it\'s old enough.')
        one_day_ago = datetime.now() - timedelta(days=1)
        filetime = datetime.fromtimestamp(
            os.path.getctime(local_folder + output_filename))
        if not filetime < one_day_ago:
            print('Nope, the file is quite actual ({})\n'.format(filetime))
            a = input(
                'Do you really want to fetch the database? (y/n): ')
            if a.lower() != 'y':
                exit(0)
    # read favorites
    favorites = []
    if os.path.isfile(local_folder + fav_filename) and os.access(local_folder + fav_filename, os.R_OK):
        with open(local_folder + fav_filename, 'r') as file:
            for line in file:
                line = line.strip()
                favorites.append(line)
    else:
        # This is because the program will create weird resuls otherwise.
        # Needs some error handling but is not finished yet. So there we use this
        # now until fixed.
        print('Could not open favorites file.')
        exit(1)
    print('Downloading database (this may take some time)')
    r = requests.get(database_url)
    if r.status_code == 200:
        with open(tmpfile, "wb") as file:
            file.write(r.content)
    else:
        print('Download failed. Exiting')
        exit(1)
    # Damn this took so long to figure out that I have to escape them with \\
    # and not only with \ ... *grml*
    searchstr = '\\b(?:' + '|'.join(regions) + ')\\d+\\b'
    favsearchstr = '|'.join(favorites)
    # Open full csv file and create two DataFrames with filtered users and favorites
    csv_full = pd.read_csv(tmpfile, dtype=str, usecols=['RADIO_ID', 'CALLSIGN', 'FIRST_NAME'])
    csv_filt = csv_full[csv_full['RADIO_ID'].str.contains(searchstr)]
    csv_favs = csv_full[csv_full['CALLSIGN'].str.contains(favsearchstr, na=False)]
    # Merge regional and whitelist, sort and remove dupes
    csv_fin = pd.concat([csv_filt, csv_favs], ignore_index=True)
    csv_fin.sort_values('RADIO_ID', inplace=True)
    csv_fin.drop_duplicates(subset=['RADIO_ID'], inplace=True)
    # Write the final csv file
    csv_fin.to_csv(local_folder + output_filename, index=False)
    # print(csv_fin[csv_fin['CALLSIGN'].isin(['DD7MH'])])
if __name__ == "__main__":
    main()
                | 1 | #!/usr/bin/env python3 | 
| 2 | |
| 3 | """Fetches the DMRID database from radioid.net | 
| 4 | |
| 5 | This script fetches the database of DMRIDs directly from | 
| 6 | radioid.net and creates a CSV file to import it into | 
| 7 | the GD77 running OpenGD77. | 
| 8 | |
| 9 | Author: Dominic Reich “OE7DRT” | 
| 10 | <quick.hat4396@qtztsjosmprqmgtunjyf.com> | 
| 11 | |
| 12 | |
| 13 | Usage: | 
| 14 | ------ | 
| 15 | ./dmrids.py | 
| 16 | |
| 17 | No arguments are needed. Specify the folders in the | 
| 18 | source below. | 
| 19 | """ | 
| 20 | |
| 21 | import os | 
| 22 | from datetime import datetime, timedelta | 
| 23 | import requests | 
| 24 | import pandas as pd | 
| 25 | |
| 26 | |
| 27 | def main(): | 
| 28 | """main function | 
| 29 | |
| 30 | Runs if script is run by itself. | 
| 31 | """ | 
| 32 | |
| 33 | # User configuration -------------------------------- | 
| 34 | database_url = 'https://www.radioid.net/static/user.csv' | 
| 35 | local_folder = '/home/dominic/.hamradio/' | 
| 36 | output_filename = 'DMR-IDS-GD77.csv' | 
| 37 | tmpfile = '/tmp/dmrids.download' | 
| 38 | regions = ['232', '262', '263', '264', '228', '222'] | 
| 39 | # regions = ['232'] | 
| 40 | fav_filename = 'Favorites.txt' | 
| 41 | # blacklist_filename = 'BlacklistedCalls.txt' | 
| 42 | |
| 43 | # End of User configuration ------------------------- | 
| 44 | |
| 45 | if os.path.isfile(local_folder + output_filename) and os.access(local_folder + output_filename, os.R_OK): | 
| 46 | print('Local file found. Will overwrite the file if it\'s old enough.') | 
| 47 | one_day_ago = datetime.now() - timedelta(days=1) | 
| 48 | filetime = datetime.fromtimestamp( | 
| 49 | os.path.getctime(local_folder + output_filename)) | 
| 50 | if not filetime < one_day_ago: | 
| 51 | print('Nope, the file is quite actual ({})\n'.format(filetime)) | 
| 52 | a = input( | 
| 53 | 'Do you really want to fetch the database? (y/n): ') | 
| 54 | if a.lower() != 'y': | 
| 55 | exit(0) | 
| 56 | |
| 57 | # read favorites | 
| 58 | favorites = [] | 
| 59 | if os.path.isfile(local_folder + fav_filename) and os.access(local_folder + fav_filename, os.R_OK): | 
| 60 | with open(local_folder + fav_filename, 'r') as file: | 
| 61 | for line in file: | 
| 62 | line = line.strip() | 
| 63 | favorites.append(line) | 
| 64 | else: | 
| 65 | # This is because the program will create weird resuls otherwise. | 
| 66 | # Needs some error handling but is not finished yet. So there we use this | 
| 67 | # now until fixed. | 
| 68 | print('Could not open favorites file.') | 
| 69 | exit(1) | 
| 70 | |
| 71 | print('Downloading database (this may take some time)') | 
| 72 | r = requests.get(database_url) | 
| 73 | if r.status_code == 200: | 
| 74 | with open(tmpfile, "wb") as file: | 
| 75 | file.write(r.content) | 
| 76 | else: | 
| 77 | print('Download failed. Exiting') | 
| 78 | exit(1) | 
| 79 | |
| 80 | # Damn this took so long to figure out that I have to escape them with \\ | 
| 81 | # and not only with \ ... *grml* | 
| 82 | searchstr = '\\b(?:' + '|'.join(regions) + ')\\d+\\b' | 
| 83 | favsearchstr = '|'.join(favorites) | 
| 84 | |
| 85 | # Open full csv file and create two DataFrames with filtered users and favorites | 
| 86 | csv_full = pd.read_csv(tmpfile, dtype=str, usecols=['RADIO_ID', 'CALLSIGN', 'FIRST_NAME']) | 
| 87 | csv_filt = csv_full[csv_full['RADIO_ID'].str.contains(searchstr)] | 
| 88 | csv_favs = csv_full[csv_full['CALLSIGN'].str.contains(favsearchstr, na=False)] | 
| 89 | |
| 90 | # Merge regional and whitelist, sort and remove dupes | 
| 91 | csv_fin = pd.concat([csv_filt, csv_favs], ignore_index=True) | 
| 92 | csv_fin.sort_values('RADIO_ID', inplace=True) | 
| 93 | csv_fin.drop_duplicates(subset=['RADIO_ID'], inplace=True) | 
| 94 | |
| 95 | # Write the final csv file | 
| 96 | csv_fin.to_csv(local_folder + output_filename, index=False) | 
| 97 | # print(csv_fin[csv_fin['CALLSIGN'].isin(['DD7MH'])]) | 
| 98 | |
| 99 | |
| 100 | if __name__ == "__main__": | 
| 101 | main() | 
| 102 |