Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Multi-core Processing #7

Open
sparthir opened this issue Apr 19, 2020 · 2 comments
Open

Multi-core Processing #7

sparthir opened this issue Apr 19, 2020 · 2 comments

Comments

@sparthir
Copy link
Contributor

Just came across this article and started to wonder if it is also another option to speed up the raytracing in Python.

https://medium.com/@urban_institute/using-multiprocessing-to-make-python-code-faster-23ea5ef996ba

There is a cost so without experimenting I'm not sure if it will be more efficient yet but I believe that by default Python is single core (I could be wrong).

However it could find out how many cores you have and send out chunks of pixels to be calculated on different cores and return them. You can find out how many cores with:

import multiprocessing
print( multiprocessing.cpu_count() )

Could be interesting to play with to see if it can get even faster. :)

@rafael-fuente
Copy link

rafael-fuente commented Apr 29, 2020

For what I know Numpy is already using multithreading, so I cannot think it can be made faster using multiprocessing.

Anyways I made a quick edit of the raytracer to check that splitting pixels in the cores. It runs slower:

from PIL import Image
from functools import reduce
import numpy as np
import time
import numbers





def extract(cond, x):
    if isinstance(x, numbers.Number):
        return x
    else:
        return np.extract(cond, x)
def concatenate(v):
    return vec3(np.concatenate([i.x for i in v]),
                np.concatenate([i.y for i in v]),
                np.concatenate([i.z for i in v]))


class vec3():
    def __init__(self, x, y, z):
        (self.x, self.y, self.z) = (x, y, z)
    def __mul__(self, other):
        return vec3(self.x * other, self.y * other, self.z * other)
    def __add__(self, other):
        return vec3(self.x + other.x, self.y + other.y, self.z + other.z)
    def __sub__(self, other):
        return vec3(self.x - other.x, self.y - other.y, self.z - other.z)
    def dot(self, other):
        return (self.x * other.x) + (self.y * other.y) + (self.z * other.z)
    def __abs__(self):
        return self.dot(self)
    def norm(self):
        mag = np.sqrt(abs(self))
        return self * (1.0 / np.where(mag == 0, 1, mag))
    def components(self):
        return (self.x, self.y, self.z)
    def extract(self, cond):
        return vec3(extract(cond, self.x),
                    extract(cond, self.y),
                    extract(cond, self.z))
    
    
    def split(self, n):

        lst = list(zip(np.split(self.x, n), np.split(self.y, n),np.split(self.z, n)))
        v = []
        for i in lst:
            v.append(vec3(i[0],i[1],i[2])) 
        return v
    

    

    def place(self, cond):
        r = vec3(np.zeros(cond.shape), np.zeros(cond.shape), np.zeros(cond.shape))
        np.place(r.x, cond, self.x)
        np.place(r.y, cond, self.y)
        np.place(r.z, cond, self.z)
        return r


rgb = vec3

def multiprocessing_raytrace(O, D, scene, process, return_dict, bounce = 0):
    # O is the ray origin, D is the normalized ray direction
    # scene is a list of Sphere objects (see below)
    # bounce is the number of the bounce, starting at zero for camera rays

    distances = [s.intersect(O, D) for s in scene]
    nearest = reduce(np.minimum, distances)
    
    max_r_distance = 10
    r_distance = np.where(nearest <= max_r_distance, nearest, max_r_distance)
    norm_r_distance = r_distance/max_r_distance
    return_dict[process] = rgb(norm_r_distance, norm_r_distance, norm_r_distance)



class Sphere:
    def __init__(self, center, r, diffuse, mirror = 0.5):
        self.c = center
        self.r = r
        self.diffuse = diffuse
        self.mirror = mirror

    def intersect(self, O, D):

        FARAWAY = 1.0e39   
        b = 2 * D.dot(O - self.c)
        c = abs(self.c) + abs(O) - 2 * self.c.dot(O) - (self.r * self.r)
        disc = (b ** 2) - (4 * c)
        sq = np.sqrt(np.maximum(0, disc))
        h0 = (-b - sq) / 2
        h1 = (-b + sq) / 2
        h = np.where((h0 > 0) & (h0 < h1), h0, h1)
        pred = (disc > 0) & (h > 0)
        return np.where(pred, h, FARAWAY)

    def diffusecolor(self, M):
        return self.diffuse

    def light(self, O, D, d, scene, bounce):

        M = (O + D * d)                         # intersection point
        N = (M - self.c) * (1. / self.r)        # normal
        toL = (L - M).norm()                    # direction to light
        toO = (E - M).norm()                    # direction to ray origin
        nudged = M + N * .0001                  # M nudged to avoid itself



        # Shadow: find if the point is shadowed or not.
        # This amounts to finding out if M can see the light
        light_distances = [s.intersect(nudged, toL) for s in scene]
        light_nearest = reduce(np.minimum, light_distances)
        seelight = light_distances[scene.index(self)] == light_nearest

        # Ambient
        color = rgb(0.05, 0.05, 0.05)

        # Lambert shading (diffuse)
        lv = np.maximum(N.dot(toL), 0)
        color += self.diffusecolor(M) * lv * seelight

        # Reflection
        if bounce < 0:
            rayD = (D - N * 2 * D.dot(N)).norm()
            color += raytrace(nudged, rayD, scene, bounce + 1) * self.mirror

        # Blinn-Phong shading (specular)
        phong = N.dot((toL + toO).norm())
        color += rgb(1, 1, 1) * np.power(np.clip(phong, 0, 1), 50) * seelight
        return color

class CheckeredSphere(Sphere):
    def diffusecolor(self, M):
        checker = ((M.x * 2).astype(int) % 2) == ((M.z * 2).astype(int) % 2)
        return self.diffuse * checker





import multiprocessing

if __name__ == '__main__':

    scene = [
        Sphere(vec3(.75, .1, 1), .6, rgb(0, 0, 1)),
        Sphere(vec3(-.75, .1, 2.25), .6, rgb(.5, .223, .5)),
        Sphere(vec3(-2.75, .1, 3.5), .6, rgb(1, .572, .184)),
        CheckeredSphere(vec3(0,-99999.5, 0), 99999, rgb(.75, .75, .75), 0.25),
        ]
    
    L = vec3(5, 5, -10)        # Point light position
    E = vec3(0, 0.35, -1)     # Eye position

    (w, h) = (400,300)         # Screen size
    r = float(w) / h
    # Screen coordinates: x0, y0, x1, y1.
    S = (-1, 1 / r + .25, 1, -1 / r + .25)
    x = np.tile(np.linspace(S[0], S[2], w), h)
    y = np.repeat(np.linspace(S[1], S[3], h), w)



    splits = 4
    raydir_splitted = [None]*splits

    Q = vec3(x, y, 0)
    raydir = (Q - E).norm()
    raydir_splitted = raydir.split(splits)
    t0 = time.time()


    manager = multiprocessing.Manager()
    return_dict = manager.dict()
    
    jobs = []
    for i in range(splits):


        p = multiprocessing.Process(target=multiprocessing_raytrace, args=(E, raydir_splitted[i] ,scene, i, return_dict))
        jobs.append(p)
        p.start()

    for proc in jobs:
        proc.join()


    color = concatenate([return_dict[k] for k in return_dict.keys()])
    
            
            
            
    print ("Took", time.time() - t0)

    rgb = [Image.fromarray((255 * np.clip(c, 0, 1).reshape((int(h), w))).astype(np.uint8), "L") for c in color.components()]
    im = Image.merge("RGB", rgb)
    im.show()

@sparthir
Copy link
Contributor Author

Ah good to know. I suspected it would run slower but I wasn't sure and I don't have the chops to write it up well.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants