A script that replicates all examples in my blog post on inferring probabilities using a Beta prior-- see bayes post for more information.
$ python ex003_bayes.py| from bs4 import BeautifulSoup,SoupStrainer | |
| import urllib.request | |
| import colorama,re,queue,threading | |
| from colorama import Fore | |
| from urllib.parse import * | |
| class check_link(): | |
| def __init__(self,address): | |
| self.address=address | |
| def check(self,address): |
| #!/usr/bin/env python | |
| """ | |
| Simple Indexer | |
| ================================= | |
| Author: Jon Hurlock, October 2011 | |
| This script basically crawls a domain (not just a page) and | |
| then extracts all links <a href=""></a>, and finds all links | |
| on that domain it also is able extract different file types |
A script that replicates all examples in my blog post on inferring probabilities using a Beta prior-- see bayes post for more information.
$ python ex003_bayes.py| import heapq | |
| from collections import defaultdict | |
| class Graph: | |
| def __init__(self, n): | |
| self.nodes = set(range(n)) | |
| self.edges = defaultdict(list) | |
| self.distances = {} |
| from queue import Queue, PriorityQueue | |
| def bfs(graph, start, end): | |
| """ | |
| Compute DFS(Depth First Search) for a graph | |
| :param graph: The given graph | |
| :param start: Node to start BFS | |
| :param end: Goal-node | |
| """ |
| def bfs(graph, start): | |
| visited, queue = set(), [start] | |
| while queue: | |
| vertex = queue.pop(0) | |
| if vertex not in visited: | |
| visited.add(vertex) | |
| queue.extend(graph[vertex] - visited) | |
| return visited | |
| class Node : | |
| def __init__( self, data ) : | |
| self.data = data | |
| self.next = None | |
| self.prev = None | |
| class LinkedList : | |
| def __init__( self ) : | |
| self.head = None |