Vous avez reçu un message "Your GitLab account has been locked ..." ? Pas d'inquiétude : lisez cet article https://docs.gricad-pages.univ-grenoble-alpes.fr/help/unlock/

Commit 5af6249e authored by paugier's avatar paugier
Browse files

Version Numba 0.43.1

parent bbb56c4c
#!/usr/bin/env python
import concurrent.futures as futures
import itertools
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
def profile(func):
""" decorator that profile a function """
def wrapper(*args, **kwargs):
import time
start = time.time()
func(*args, **kwargs)
end = time.time()
print(end - start)
return wrapper
def factorize_naive(n):
""" A naive factorization method. Take integer 'n', return list of
logging.debug("starting factorize_naive({})".format(n))
if n < 2:
logging.debug("ending factorize_naive({}) = []".format(n))
return []
factors = []
p = 2
while True:
if n == 1:
logging.debug("ending factorize_naive({}) = {}".format(n, factors))
return factors
r = n % p
if r == 0:
n = n / p
elif p * p >= n:
logging.debug("ending factorize_naive({}) = {}".format(n, factors))
return factors
elif p > 2:
# Advance in steps of 2 over odd numbers
p += 2
# If p == 2, get to 3
p += 1
def run_func(func, nb_jobs, inputs):
"""run fib in // for each element of the iterable
:param func: (callale) function to call. Take as input one parameter
:param nb_jobs: (int) the number of jobs to run in parallel
:param inputs: iterable over parameters
:return: (dic) key is the input, value is the value returned by the func
# We can use a with statement to ensure threads are cleaned up promptly
future_to_data = {}
with futures.ProcessPoolExecutor(max_workers=nb_jobs) as executor:
future_to_data = {executor.submit(func, data): data for data in inputs}
# all the future object has been submitted -> they are running
# iteration over dictionary iterates over keys
logging.debug("end of submit")
for f in future_to_data:
data = future_to_data[f]
yield data, f.result()
for f in futures.as_completed(future_to_data):
data = future_to_data[f]
yield data, f.result()
def main():
"""run main
inputs = itertools.chain(range(100), range(200000, 12000000, 100))
# logging.info("len inputs=%d", len(inputs))
for data, res in run_func(factorize_naive, 4, inputs):
logging.info("f({}) = {}".format(data, res))
if __name__ == "__main__":
import multiprocessing as mp
import sys
import numpy as np
def is_prime(number):
"""return True if number is prime, else False
:number: (int) a positive number (no check)
:returns: (bool) true if number is prime, False else
for i in range(2, number):
if number % i == 0:
return False
return True
def first_primes(nb_to_check, nb_proc=None):
"""Prints the first prime numbers
:bound: (int) number of primes to check
:nb_proc: (int) the number of parallel jobs to run (default nb procs available)
:returns: a list of the first nb_to_check prime numbers
inputs = range(2, nb_to_check)
with mp.Pool(nb_proc) as pool:
res = [False, False] + pool.map(is_prime, inputs)
return [idx for (idx, e) in enumerate(res) if e]
def main(argv):
if len(sys.argv) < 2:
print(f"usage: {argv[0]} nb_prime_to_check [nb_procs=4]")
if len(sys.argv) < 3:
nb_proc = mp.cpu_count()
nb_proc = int(sys.argv[2])
nb_to_check = int(sys.argv[1])
print(f"looking for the {nb_to_check} primes number using {nb_proc} procs")
primes = first_primes(nb_to_check)
print(f"first {nb_to_check} primes: {primes}")
if __name__ == '__main__':
import multiprocessing as mp
import sys
import numpy as np
def distances(item, collection, dist):
"""Return the set of distances between item and elements of collection,
using dist as distance measure
Complexity = O(|collection| * O(dist))
:item: an element to look for
:collection: (iterable) collection of elements that are comparable with item
:dist: function that takes two items and return the distance between them
:returns:(list) distances between item and each elem of collection
return [dist(item, e) for e in collection]
def abs_dist(x, y):
"""return the absolute of the difference of the inputs """
return abs(x-y)
def compute_abs_dist(col, nb_proc):
"""Computes the distance matrix between elements in collection
:col: (iterable) the set of elements to consider.
:nb_proc: the number of parallel jobs to run
:returns: distance matrix
inputs = [(x, col, abs_dist) for x in col]
collection_size = len(col)
dist_mat = None
with mp.Pool(nb_proc) as pool:
dist_mat = pool.starmap(distances, inputs)
# results gets back sorted in the same order as provided
res = np.empty([collection_size, collection_size])
for idx, elem in enumerate(dist_mat):
res[idx,:] = elem
return res
def main(argv):
if len(sys.argv) < 2:
print(f"usage: {argv[0]} collection_size [nb_procs]")
if len(sys.argv) < 3:
nb_proc = mp.cpu_count()
nb_proc = int(sys.argv[2])
collection_size = int(sys.argv[1])
col = np.random.random(collection_size)
print(f"collection: {col}")
res = compute_abs_dist(col, nb_proc)
import matplotlib.pyplot as plt
if __name__ == '__main__':
......@@ -18,7 +18,7 @@ pyviz
# accelerators
......@@ -4,8 +4,6 @@
- [Franck] Split 21_pres_algos_dtw_cort.ipynb in more than one page!
- [Pierre] Complete 22_profiling.ipynb
- [Franck, Loic] 30_wrapping.ipynb (que retenir des exemples?)
- [Pierre, Franck] 31_accelerators.ipynb (que retenir des exemples?)
......@@ -14,8 +12,6 @@
- [Pierre, Franck] parallel (presentation + examples)
- [Cyrille] dtw_cort_dist avec MPI (done!!! To validate)
- [Franck, Raphaël, Cyrille] présentation 41_cluster.ipynb (Charliecloud pour
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment