# 绘制图像作为Voronoi地图

170

• 原始图像及其颜色的近似程度。
• 该算法在不同种类的图像上的效果如何。
• 该算法对小N的效果如何。
• 该算法是否自适应地对需要更多细节的图像区域中的点进行聚类。

## 渲染器

### 相关挑战

5
@frogeyedpeas通过查看投票获得。;）这是一次人气竞赛。不一定最好的方法。这个想法是您尽力做到最好，而选票将反映出人们是否同意您做得很好。诚然，在这些方面有一定的主观性。看看我联系，或相关的挑战，在这一个。您会看到通常有各种各样的方法，但是投票系统可以帮助更好的解决方案达到顶峰并决定获胜者。

3

Alex A.

3
@AlexA。德文（Devon）批准了到目前为止提交的部分脸部近似图像。他不是n = 100版本的

1
@Geobits：他长大后会明白的。
Alex A.

1

112

# Python + scipy + scikit-image，加权泊松圆盘采样

``````import math
import random
import collections
import os
import sys
import functools
import operator as op
import numpy as np
import warnings

from scipy.spatial import cKDTree as KDTree
from skimage.filters.rank import entropy
from skimage.morphology import disk, dilation
from skimage.util import img_as_ubyte
from skimage.io import imread, imsave
from skimage.color import rgb2gray, rgb2lab, lab2rgb
from skimage.filters import sobel, gaussian_filter
from skimage.restoration import denoise_bilateral
from skimage.transform import downscale_local_mean

# Returns a random real number in half-open range [0, x).
def rand(x):
r = x
while r == x:
r = random.uniform(0, x)
return r

def poisson_disc(img, n, k=30):
h, w = img.shape[:2]

nimg = denoise_bilateral(img, sigma_range=0.15, sigma_spatial=15)
img_gray = rgb2gray(nimg)
img_lab = rgb2lab(nimg)

entropy_weight = 2**(entropy(img_as_ubyte(img_gray), disk(15)))
entropy_weight /= np.amax(entropy_weight)
entropy_weight = gaussian_filter(dilation(entropy_weight, disk(15)), 5)

color = [sobel(img_lab[:, :, channel])**2 for channel in range(1, 3)]
edge_weight = functools.reduce(op.add, color) ** (1/2) / 75
edge_weight = dilation(edge_weight, disk(5))

weight = (0.3*entropy_weight + 0.7*edge_weight)
weight /= np.mean(weight)
weight = weight

max_dist = min(h, w) / 4
avg_dist = math.sqrt(w * h / (n * math.pi * 0.5) ** (1.05))
min_dist = avg_dist / 4

dists = np.clip(avg_dist / weight, min_dist, max_dist)

def gen_rand_point_around(point):
radius = random.uniform(dists[point], max_dist)
angle = rand(2 * math.pi)
offset = np.array([radius * math.sin(angle), radius * math.cos(angle)])
return tuple(point + offset)

def has_neighbours(point):
point_dist = dists[point]
distances, idxs = tree.query(point,
len(sample_points) + 1,
distance_upper_bound=max_dist)

if len(distances) == 0:
return True

for dist, idx in zip(distances, idxs):
if np.isinf(dist):
break

if dist < point_dist and dist < dists[tuple(tree.data[idx])]:
return True

return False

# Generate first point randomly.
first_point = (rand(h), rand(w))
to_process = [first_point]
sample_points = [first_point]
tree = KDTree(sample_points)

while to_process:
# Pop a random point.
point = to_process.pop(random.randrange(len(to_process)))

for _ in range(k):
new_point = gen_rand_point_around(point)

if (0 <= new_point[0] < h and 0 <= new_point[1] < w
and not has_neighbours(new_point)):
to_process.append(new_point)
sample_points.append(new_point)
tree = KDTree(sample_points)
if len(sample_points) % 1000 == 0:
print("Generated {} points.".format(len(sample_points)))

print("Generated {} points.".format(len(sample_points)))

return sample_points

def sample_colors(img, sample_points, n):
h, w = img.shape[:2]

print("Sampling colors...")
tree = KDTree(np.array(sample_points))
color_samples = collections.defaultdict(list)
img_lab = rgb2lab(img)
xx, yy = np.meshgrid(np.arange(h), np.arange(w))
pixel_coords = np.c_[xx.ravel(), yy.ravel()]
nearest = tree.query(pixel_coords)[1]

i = 0
for pixel_coord in pixel_coords:
color_samples[tuple(tree.data[nearest[i]])].append(
img_lab[tuple(pixel_coord)])
i += 1

print("Computing color means...")
samples = []
for point, colors in color_samples.items():
avg_color = np.sum(colors, axis=0) / len(colors)
samples.append(np.append(point, avg_color))

if len(samples) > n:
print("Downsampling {} to {} points...".format(len(samples), n))

while len(samples) > n:
tree = KDTree(np.array(samples))
dists, neighbours = tree.query(np.array(samples), 2)
dists = dists[:, 1]
worst_idx = min(range(len(samples)), key=lambda i: dists[i])
samples[neighbours[worst_idx][1]] += samples[neighbours[worst_idx][0]]
samples[neighbours[worst_idx][1]] /= 2
samples.pop(neighbours[worst_idx][0])

color_samples = []
for sample in samples:
color = lab2rgb([[sample[2:]]])[0][0]
color_samples.append(tuple(sample[:2][::-1]) + tuple(color))

return color_samples

def render(img, color_samples):
print("Rendering...")
h, w = [2*x for x in img.shape[:2]]
xx, yy = np.meshgrid(np.arange(h), np.arange(w))
pixel_coords = np.c_[xx.ravel(), yy.ravel()]

colors = np.empty([h, w, 3])
coords = []
for color_sample in color_samples:
coord = tuple(x*2 for x in color_sample[:2][::-1])
colors[coord] = color_sample[2:]
coords.append(coord)

tree = KDTree(coords)
idxs = tree.query(pixel_coords)[1]
data = colors[tuple(tree.data[idxs].astype(int).T)].reshape((w, h, 3))
data = np.transpose(data, (1, 0, 2))

return downscale_local_mean(data, (2, 2, 1))

if __name__ == "__main__":
warnings.simplefilter("ignore")

img = imread(sys.argv[1])[:, :, :3]

print("Calibrating...")
mult = 1.02 * 500 / len(poisson_disc(img, 500))

for n in (100, 300, 1000, 3000):
print("Sampling {} for size {}.".format(sys.argv[1], n))

sample_points = poisson_disc(img, mult * n)
samples = sample_colors(img, sample_points, n)
base = os.path.basename(sys.argv[1])
with open("{}-{}.txt".format(os.path.splitext(base)[0], n), "w") as f:
for sample in samples:
f.write(" ".join("{:.3f}".format(x) for x in sample) + "\n")

imsave("autorenders/{}-{}.png".format(os.path.splitext(base)[0], n),
render(img, samples))

print("Done!")
``````

# 图片

2

BobTheAwesome 2015年

3

LKlevin

@LKlevin您使用了什么体重？
orlp

LKlevin

65

# C ++

## 码

``````#include <cstdlib>
#include <cmath>
#include <string>
#include <vector>
#include <fstream>
#include <istream>
#include <ostream>
#include <iostream>
#include <algorithm>
#include <random>

static auto const decimation = 2;
static auto const candidates = 96;
static auto const termination = 200;

using namespace std;

struct rgb {float red, green, blue;};
struct img {int width, height; vector<rgb> pixels;};
struct site {float x, y; rgb color;};

img read(string const &name) {
ifstream file{name, ios::in | ios::binary};
auto result = img{0, 0, {}};
if (file.get() != 'P' || file.get() != '6')
return result;
auto skip = [&](){
while (file.peek() < '0' || '9' < file.peek())
if (file.get() == '#')
while (file.peek() != '\r' && file.peek() != '\n')
file.get();
};
auto maximum = 0;
skip(); file >> result.width;
skip(); file >> result.height;
skip(); file >> maximum;
file.get();
for (auto pixel = 0; pixel < result.width * result.height; ++pixel) {
auto red = file.get() * 1.0f / maximum;
auto green = file.get() * 1.0f / maximum;
auto blue = file.get() * 1.0f / maximum;
result.pixels.emplace_back(rgb{red, green, blue});
}
return result;
}

float evaluate(img const &target, vector<site> &sites) {
auto counts = vector<int>(sites.size());
auto variance = vector<rgb>(sites.size());
for (auto &site : sites)
site.color = rgb{0.0f, 0.0f, 0.0f};
for (auto y = 0; y < target.height; y += decimation)
for (auto x = 0; x < target.width; x += decimation) {
auto best = 0;
auto closest = 1.0e30f;
for (auto index = 0; index < sites.size(); ++index) {
float distance = ((x - sites[index].x) * (x - sites[index].x) +
(y - sites[index].y) * (y - sites[index].y));
if (distance < closest) {
best = index;
closest = distance;
}
}
++counts[best];
auto &pixel = target.pixels[y * target.width + x];
auto &color = sites[best].color;
rgb delta = {pixel.red - color.red,
pixel.green - color.green,
pixel.blue - color.blue};
color.red += delta.red / counts[best];
color.green += delta.green / counts[best];
color.blue += delta.blue / counts[best];
variance[best].red += delta.red * (pixel.red - color.red);
variance[best].green += delta.green * (pixel.green - color.green);
variance[best].blue += delta.blue * (pixel.blue - color.blue);
}
auto error = 0.0f;
auto count = 0;
for (auto index = 0; index < sites.size(); ++index) {
if (!counts[index]) {
auto x = min(max(static_cast<int>(sites[index].x), 0), target.width - 1);
auto y = min(max(static_cast<int>(sites[index].y), 0), target.height - 1);
sites[index].color = target.pixels[y * target.width + x];
}
count += counts[index];
error += variance[index].red + variance[index].green + variance[index].blue;
}
return 10.0f * log10f(count * 3 / error);
}

void write(string const &name, int const width, int const height, vector<site> const &sites) {
ofstream file{name, ios::out};
file << width << " " << height << endl;
for (auto const &site : sites)
file << site.x << " " << site.y << " "
<< site.color.red << " "<< site.color.green << " "<< site.color.blue << endl;
}

int main(int argc, char **argv) {
auto rng = mt19937{random_device{}()};
auto uniform = uniform_real_distribution<float>{0.0f, 1.0f};
auto target = read(argv[1]);
auto sites = vector<site>{};
for (auto point = atoi(argv[2]); point; --point)
sites.emplace_back(site{
target.width * uniform(rng),
target.height * uniform(rng)});
auto greatest = 0.0f;
auto remaining = termination;
for (auto step = 0; remaining; ++step, --remaining) {
auto best_candidate = sites;
auto best_psnr = 0.0f;
#pragma omp parallel for
for (auto candidate = 0; candidate < candidates; ++candidate) {
auto trial = sites;
#pragma omp critical
{
trial[step % sites.size()].x = target.width * (uniform(rng) * 1.2f - 0.1f);
trial[step % sites.size()].y = target.height * (uniform(rng) * 1.2f - 0.1f);
}
auto psnr = evaluate(target, trial);
#pragma omp critical
if (psnr > best_psnr) {
best_candidate = trial;
best_psnr = psnr;
}
}
sites = best_candidate;
if (best_psnr > greatest) {
greatest = best_psnr;
remaining = termination;
write(argv[3], target.width, target.height, sites);
}
cout << "Step " << step << "/" << remaining
<< ", PSNR = " << best_psnr << endl;
}
return 0;
}
``````

## 跑步

``````g++ -std=c++11 -fopenmp -O3 -o voronoi voronoi.cpp
``````

``````./voronoi cornell.ppm 1000 cornell-1000.txt
``````

## 图片

`N` = 100、300、1000和3000：

1

orlp 2015年

1
@orlp-谢谢！公平地说，您发布的时间更快，并且运行速度更快。速度很重要！
Boojum 2015年

1

orlp 2015年

55

# IDL，自适应细化

``````function subdivide, image, bounds, vars
;subdivide a section into 4, and return the 4 subdivisions and the variance of each
division = list()
vars = list()
nx = bounds[2] - bounds[0]
ny = bounds[3] - bounds[1]
for i=0,1 do begin
for j=0,1 do begin
x = i * nx/2 + bounds[0]
y = j * ny/2 + bounds[1]
sub = image[x:x+nx/2-(~(nx mod 2)),y:y+ny/2-(~(ny mod 2))]
division.add, [x,y,x+nx/2-(~(nx mod 2)),y+ny/2-(~(ny mod 2))]
vars.add, variance(sub) * n_elements(sub)
endfor
endfor
return, division
end

pro voro_map, n, image, outfile
sz = size(image, /dim)
;first, convert image to greyscale, and then use a Prewitt filter to pick out edges
edges = prewitt(reform(ct_luminance(image[0,*,*], image[1,*,*], image[2,*,*])))
;next, iteratively subdivide the image into sections, using variance to pick
;the next subdivision target (variance -> detail) until we've hit N subdivisions
subdivisions = subdivide(edges, [0,0,sz[1],sz[2]], variances)
while subdivisions.count() lt (n - 2) do begin
!null = max(variances.toarray(),target)
oldsub = subdivisions.remove(target)
newsub = subdivide(edges, oldsub, vars)
if subdivisions.count(newsub[0]) gt 0 or subdivisions.count(newsub[1]) gt 0 or subdivisions.count(newsub[2]) gt 0 or subdivisions.count(newsub[3]) gt 0 then stop
subdivisions += newsub
variances.remove, target
variances += vars
endwhile
;now we find the minimum edge value of each subdivision (we want to pick representative
;colors, not edge colors) and use that as the site (with associated color)
sites = fltarr(2,n)
colors = lonarr(n)
foreach sub, subdivisions, i do begin
slice = edges[sub[0]:sub[2],sub[1]:sub[3]]
!null = min(slice,target)
sxy = array_indices(slice, target) + sub[0:1]
sites[*,i] = sxy
colors[i] = cgcolor24(image[0:2,sxy[0],sxy[1]])
endforeach
;finally, generate the voronoi map
old = !d.NAME
set_plot, 'Z'
device, set_resolution=sz[1:2], decomposed=1, set_pixel_depth=24
triangulate, sites[0,*], sites[1,*], tr, connectivity=C
for i=0,n-1 do begin
if C[i] eq C[i+1] then continue
voronoi, sites[0,*], sites[1,*], i, C, xp, yp
cgpolygon, xp, yp, color=colors[i], /fill, /device
endfor
!null = cgsnapshot(file=outfile, /nodialog)
set_plot, old
end

pro wrapper
cd, '~/voronoi'
fs = file_search()
foreach f,fs do begin
base = strsplit(f,'.',/extract)
if base[1] eq 'png' then im = read_png(f) else read_jpeg, f, im
voro_map,100, im, base[0]+'100.png'
voro_map,500, im, base[0]+'500.png'
voro_map,1000,im, base[0]+'1000.png'
endforeach
end
``````

`n = 100`

`n = 500`

`n = 1000`

9

sirpercival，2015年

3

BrainSteel 2015年

2
@BrianSteel我想这些轮廓会被当作高差异区域而不必要地集中在其他区域，因此其他真正的高细节区域会因此分配较少的点。
doppelgreener 2015年

@BrainSteel我认为doppel是正确的-黑色边框和白色背景之间有很强的边缘，这需要算法中的很多细节。我不知道，如果这是我能够（或者，更重要的是，应该）修复...
sirpercival

47

# Python 3 + PIL + SciPy，模糊k均值

``````from collections import defaultdict
import itertools
import random
import time

from PIL import Image
import numpy as np
from scipy.spatial import KDTree, Delaunay

INFILE = "planet.jpg"
OUTFILE = "voronoi.txt"
N = 3000

DEBUG = True # Outputs extra images to see what's happening
FEATURE_FILE = "features.png"
SAMPLE_FILE = "samples.png"
SAMPLE_POINTS = 20000
ITERATIONS = 10
CLOSE_COLOR_THRESHOLD = 15

"""
Color conversion functions
"""

start_time = time.time()

# http://www.easyrgb.com/?X=MATH
def rgb2xyz(rgb):
r, g, b = rgb
r /= 255
g /= 255
b /= 255

r = ((r + 0.055)/1.055)**2.4 if r > 0.04045 else r/12.92
g = ((g + 0.055)/1.055)**2.4 if g > 0.04045 else g/12.92
b = ((b + 0.055)/1.055)**2.4 if b > 0.04045 else b/12.92

r *= 100
g *= 100
b *= 100

x = r*0.4124 + g*0.3576 + b*0.1805
y = r*0.2126 + g*0.7152 + b*0.0722
z = r*0.0193 + g*0.1192 + b*0.9505

return (x, y, z)

def xyz2lab(xyz):
x, y, z = xyz
x /= 95.047
y /= 100
z /= 108.883

x = x**(1/3) if x > 0.008856 else 7.787*x + 16/116
y = y**(1/3) if y > 0.008856 else 7.787*y + 16/116
z = z**(1/3) if z > 0.008856 else 7.787*z + 16/116

L = 116*y - 16
a = 500*(x - y)
b = 200*(y - z)

return (L, a, b)

def rgb2lab(rgb):
return xyz2lab(rgb2xyz(rgb))

def lab2xyz(lab):
L, a, b = lab
y = (L + 16)/116
x = a/500 + y
z = y - b/200

y = y**3 if y**3 > 0.008856 else (y - 16/116)/7.787
x = x**3 if x**3 > 0.008856 else (x - 16/116)/7.787
z = z**3 if z**3 > 0.008856 else (z - 16/116)/7.787

x *= 95.047
y *= 100
z *= 108.883

return (x, y, z)

def xyz2rgb(xyz):
x, y, z = xyz
x /= 100
y /= 100
z /= 100

r = x* 3.2406 + y*-1.5372 + z*-0.4986
g = x*-0.9689 + y* 1.8758 + z* 0.0415
b = x* 0.0557 + y*-0.2040 + z* 1.0570

r = 1.055 * (r**(1/2.4)) - 0.055 if r > 0.0031308 else 12.92*r
g = 1.055 * (g**(1/2.4)) - 0.055 if g > 0.0031308 else 12.92*g
b = 1.055 * (b**(1/2.4)) - 0.055 if b > 0.0031308 else 12.92*b

r *= 255
g *= 255
b *= 255

return (r, g, b)

def lab2rgb(lab):
return xyz2rgb(lab2xyz(lab))

"""
Step 1: Read image and convert to CIELAB
"""

im = Image.open(INFILE)
im = im.convert("RGB")
width, height = prev_size = im.size

pixlab_map = {}

for x in range(width):
for y in range(height):
pixlab_map[(x, y)] = rgb2lab(im.getpixel((x, y)))

print("Step 1: Image read and converted")

"""
Step 2: Get feature points
"""

def euclidean(point1, point2):
return sum((x-y)**2 for x,y in zip(point1, point2))**.5

def neighbours(pixel):
x, y = pixel
results = []

for dx, dy in itertools.product([-1, 0, 1], repeat=2):
neighbour = (pixel[0] + dx, pixel[1] + dy)

if (neighbour != pixel and 0 <= neighbour[0] < width
and 0 <= neighbour[1] < height):
results.append(neighbour)

return results

def mse(colors, base):
return sum(euclidean(x, base)**2 for x in colors)/len(colors)

features = []

for x in range(width):
for y in range(height):
pixel = (x, y)
col = pixlab_map[pixel]
features.append((mse([pixlab_map[n] for n in neighbours(pixel)], col),
random.random(),
pixel))

features.sort()
features_copy = [x[2] for x in features]

if DEBUG:
test_im = Image.new("RGB", im.size)

for i in range(len(features)):
pixel = features[i][1]
test_im.putpixel(pixel, (int(255*i/len(features)),)*3)

test_im.save(FEATURE_FILE)

print("Step 2a: Edge detection-ish complete")

def random_index(list_):
r = random.expovariate(2)

while r > 1:
r = random.expovariate(2)

return int((1 - r) * len(list_))

sample_points = set()

while features and len(sample_points) < SAMPLE_POINTS:
index = random_index(features)
point = features[index][2]
del features[index]

print("Step 2b: {} feature samples generated".format(len(sample_points)))

if DEBUG:
test_im = Image.new("RGB", im.size)

for pixel in sample_points:
test_im.putpixel(pixel, (255, 255, 255))

test_im.save(SAMPLE_FILE)

"""
Step 3: Fuzzy k-means
"""

def euclidean(point1, point2):
return sum((x-y)**2 for x,y in zip(point1, point2))**.5

def get_centroid(points):
return tuple(sum(coord)/len(points) for coord in zip(*points))

def mean_cell_color(cell):
return get_centroid([pixlab_map[pixel] for pixel in cell])

def median_cell_color(cell):
# Pick start point out of mean and up to 10 pixels in cell
mean_col = get_centroid([pixlab_map[pixel] for pixel in cell])
start_choices = [pixlab_map[pixel] for pixel in cell]

if len(start_choices) > 10:
start_choices = random.sample(start_choices, 10)

start_choices.append(mean_col)

best_dist = None
col = None

for c in start_choices:
dist = sum(euclidean(c, pixlab_map[pixel])
for pixel in cell)

if col is None or dist < best_dist:
col = c
best_dist = dist

# Approximate median by hill climbing
last = None

while last is None or euclidean(col, last) < 1e-6:
last = col

best_dist = None
best_col = None

for deviation in itertools.product([-1, 0, 1], repeat=3):
new_col = tuple(x+y for x,y in zip(col, deviation))
dist = sum(euclidean(new_col, pixlab_map[pixel])
for pixel in cell)

if best_dist is None or dist < best_dist:
best_col = new_col

col = best_col

return col

def random_point():
index = random_index(features_copy)
point = features_copy[index]

dx = random.random() * 10 - 5
dy = random.random() * 10 - 5

return (point[0] + dx, point[1] + dy)

centroids = np.asarray([random_point() for _ in range(N)])
variance = {i:float("inf") for i in range(N)}
cluster_colors = {i:(0, 0, 0) for i in range(N)}

# Initial iteration
tree = KDTree(centroids)
clusters = defaultdict(set)

for point in sample_points:
nearest = tree.query(point)[1]

# Cluster!
for iter_num in range(ITERATIONS):
if DEBUG:
test_im = Image.new("RGB", im.size)

for n, pixels in clusters.items():
color = 0xFFFFFF * (n/N)
color = (int(color//256//256%256), int(color//256%256), int(color%256))

for p in pixels:
test_im.putpixel(p, color)

test_im.save(SAMPLE_FILE)

for cluster_num in clusters:
if clusters[cluster_num]:
cols = [pixlab_map[x] for x in clusters[cluster_num]]

cluster_colors[cluster_num] = mean_cell_color(clusters[cluster_num])
variance[cluster_num] = mse(cols, cluster_colors[cluster_num])

else:
cluster_colors[cluster_num] = (0, 0, 0)
variance[cluster_num] = float("inf")

print("Clustering (iteration {})".format(iter_num))

# Remove useless/high variance
if iter_num < ITERATIONS - 1:
delaunay = Delaunay(np.asarray(centroids))
neighbours = defaultdict(set)

for simplex in delaunay.simplices:
n1, n2, n3 = simplex

neighbours[n1] |= {n2, n3}
neighbours[n2] |= {n1, n3}
neighbours[n3] |= {n1, n2}

for num, centroid in enumerate(centroids):
col = cluster_colors[num]

like_neighbours = True

nns = set() # neighbours + neighbours of neighbours

for n in neighbours[num]:
nns |= {n} | neighbours[n] - {num}

nn_far = sum(euclidean(col, cluster_colors[nn]) > CLOSE_COLOR_THRESHOLD
for nn in nns)

if nns and nn_far / len(nns) < 1/5:
sample_points -= clusters[num]

for _ in clusters[num]:
if features and len(sample_points) < SAMPLE_POINTS:
index = random_index(features)
point = features[index][3]
del features[index]

clusters[num] = set()

new_centroids = []

for i in range(N):
if clusters[i]:
new_centroids.append(get_centroid(clusters[i]))
else:
new_centroids.append(random_point())

centroids = np.asarray(new_centroids)
tree = KDTree(centroids)

clusters = defaultdict(set)

for point in sample_points:
nearest = tree.query(point, k=6)[1]
col = pixlab_map[point]

for n in nearest:
if n < N and euclidean(col, cluster_colors[n])**2 <= variance[n]:
break

else:

print("Step 3: Fuzzy k-means complete")

"""
Step 4: Output
"""

for i in range(N):
if clusters[i]:
centroids[i] = get_centroid(clusters[i])

centroids = np.asarray(centroids)
tree = KDTree(centroids)
color_clusters = defaultdict(set)

# Throw back on some sample points to get the colors right
all_points = [(x, y) for x in range(width) for y in range(height)]

for pixel in random.sample(all_points, int(min(width*height, 5 * SAMPLE_POINTS))):
nearest = tree.query(pixel)[1]

with open(OUTFILE, "w") as outfile:
for i in range(N):
if clusters[i]:
centroid = tuple(centroids[i])
col = tuple(x/255 for x in lab2rgb(median_cell_color(color_clusters[i] or clusters[i])))
print(" ".join(map(str, centroid + col)), file=outfile)

print("Done! Time taken:", time.time() - start_time)``````

## 算法

（上面的渲染输出上有一个清晰的网格状图案。根据@randomra讲，这可能是由于有损JPG编码或imgur压缩图像所致。）

• 建立质心的Delaunay三角剖分，以便我们可以轻松地查询质心的邻居。
• 使用三角剖分可删除颜色接近其大多数邻居（> 4/5）和邻居的邻居合并的质心。还删除了所有关联的采样点，并添加了新的替换质心和采样点。此步骤试图强制算法在需要详细信息的地方放置更多的群集。
• 为新的质心构造一个kd-tree，以便我们可以轻松地查询最接近任何采样点的质心。
• 使用树将每个采样点分配给6个最接近的质心之一（根据经验选择6个质心）。如果质心的颜色在质心的颜色方差阈值内，则质心仅接受该点。我们尝试将每个采样点分配给第一个接受的质心，但是如果不可能，则只需将其分配给最接近的质心。该算法的“模糊性”来自此步骤，因为群集可能会重叠。
• 重新计算质心。

（点击查看原图）

N = 32：

N = 100：

N = 1000：

N = 3000：

1

Max

26

# Mathematica，随机细胞

``````data = ImageData@Import@file;
dims = Dimensions[data][[1 ;; 2]]
{Reverse@#, data[[##]][[1 ;; 3]] & @@ Floor[1 + #]} &[dims #] & /@
RandomReal[1, {n, 2}]
``````

Faraz Masroor 2015年

2012rcampion 2015年

@ 2012rcampion没有理由，我只是不知道这两个功能都存在。我可能会在以后进行修复，还将示例图像更改为建议的N值。

23

# 码

``````VoronoiImage[img_, nSeeds_, iterations_] := Module[{
i = img,
edges = EdgeDetect@img,
voronoiRegion = Transpose[{{0, 0}, ImageDimensions[img]}],
seeds, voronoiInitial, voronoiRelaxed
},
seeds = RandomChoice[ImageValuePositions[edges, White], nSeeds];
voronoiInitial = VoronoiMesh[seeds, voronoiRegion];
voronoiRelaxed =
Nest[VoronoiMesh[Mean @@@ MeshPrimitives[#, 2], voronoiRegion] &,
voronoiInitial, iterations];
Graphics[Table[{RGBColor[ImageValue[img, Mean @@ mp]], mp},
{mp,MeshPrimitives[voronoiRelaxed, 2]}]]
];
``````

neizod

13

# Python + SciPy +司仪

1. 将图像调整为较小的尺寸（约150像素）
2. 制作一个最大通道值的未经遮罩的图像（这不会太强烈地拾取白色区域）。
3. 取绝对值。
4. 选择概率与该图像成比例的随机点。这将选择不连续点的任一侧的点。
5. 优化选择的点以降低成本函数。该函数是通道中偏差平方和的最大值（再次有助于偏向纯色，而不仅限于纯白）。我将Markov Chain Monte Carlo与emcee模块（强烈推荐）滥用为优化工具。在N次链迭代后未发现新的改进时，该过程将失败。

（右键单击并查看图像以获取更大的版本）

``````#!/usr/bin/env python

import glob
import os

import scipy.misc
import scipy.spatial
import scipy.signal
import numpy as N
import numpy.random as NR
import emcee

def compute_image(pars, rimg, gimg, bimg):
npts = len(pars) // 2
x = pars[:npts]
y = pars[npts:npts*2]
yw, xw = rimg.shape

# exit if points are too far away from image, to stop MCMC
# wandering off
if(N.any(x > 1.2*xw) or N.any(x < -0.2*xw) or
N.any(y > 1.2*yw) or N.any(y < -0.2*yw)):
return None

# compute tesselation
xy = N.column_stack( (x, y) )
tree = scipy.spatial.cKDTree(xy)

ypts, xpts = N.indices((yw, xw))
queryxy = N.column_stack((N.ravel(xpts), N.ravel(ypts)))

dist, idx = tree.query(queryxy)

idx = idx.reshape(yw, xw)
ridx = N.ravel(idx)

# tesselate image
div = 1./N.clip(N.bincount(ridx), 1, 1e99)
rav = N.bincount(ridx, weights=N.ravel(rimg)) * div
gav = N.bincount(ridx, weights=N.ravel(gimg)) * div
bav = N.bincount(ridx, weights=N.ravel(bimg)) * div

rout = rav[idx]
gout = gav[idx]
bout = bav[idx]
return rout, gout, bout

def compute_fit(pars, img_r, img_g, img_b):
"""Return fit statistic for parameters."""
# get model
retn = compute_image(pars, img_r, img_g, img_b)
if retn is None:
return -1e99
model_r, model_g, model_b = retn

# maximum squared deviation from one of the chanels
fit = max( ((img_r-model_r)**2).sum(),
((img_g-model_g)**2).sum(),
((img_b-model_b)**2).sum() )

# return fake log probability
return -fit

def convgauss(img, sigma):
"""Convolve image with a Gaussian."""
size = 3*sigma
kern = N.fromfunction(
lambda y, x: N.exp( -((x-size/2)**2+(y-size/2)**2)/2./sigma ),
(size, size))
kern /= kern.sum()
out = scipy.signal.convolve2d(img.astype(N.float64), kern, mode='same')
return out

def process_image(infilename, outroot, npts):
img_r = img[:,:,0]
img_g = img[:,:,1]
img_b = img[:,:,2]

# scale down size
maxdim = max(img_r.shape)
scale = int(maxdim / 150)
img_r = img_r[::scale, ::scale]
img_g = img_g[::scale, ::scale]
img_b = img_b[::scale, ::scale]

# make unsharp-masked image of input
img_tot = N.max((img_r, img_g, img_b), axis=0)
img1 = convgauss(img_tot, 2)
img2 = convgauss(img_tot, 32)
diff = N.abs(img1 - img2)
diff = diff/diff.max()
diffi = (diff*255).astype(N.int)
scipy.misc.imsave(outroot + '_unsharp.png', diffi)

# create random points with a probability distribution given by
# the unsharp-masked image
yw, xw = img_r.shape
xpars = []
ypars = []
while len(xpars) < npts:
ypar = NR.randint(int(yw*0.02),int(yw*0.98))
xpar = NR.randint(int(xw*0.02),int(xw*0.98))
if diff[ypar, xpar] > NR.rand():
xpars.append(xpar)
ypars.append(ypar)

# initial parameters to model
allpar = N.concatenate( (xpars, ypars) )

# set up MCMC sampler with parameters close to each other
nwalkers = npts*5  # needs to be at least 2*number of parameters+2
pos0 = []
for i in xrange(nwalkers):
pos0.append(NR.normal(0,1,allpar.shape)+allpar)

sampler = emcee.EnsembleSampler(
nwalkers, len(allpar), compute_fit,
args=[img_r, img_g, img_b],

# sample until we don't find a better fit
lastmax = -N.inf
ct = 0
ct_nobetter = 0
for result in sampler.sample(pos0, iterations=10000, storechain=False):
print ct
pos, lnprob = result[:2]
maxidx = N.argmax(lnprob)

if lnprob[maxidx] > lastmax:
# write image
lastmax = lnprob[maxidx]
mimg = compute_image(pos[maxidx], img_r, img_g, img_b)
out = N.dstack(mimg).astype(N.int32)
out = N.clip(out, 0, 255)
scipy.misc.imsave(outroot + '_binned.png', out)

# save parameters
N.savetxt(outroot + '_param.dat', scale*pos[maxidx])

ct_nobetter = 0
print(lastmax)

ct += 1
ct_nobetter += 1
if ct_nobetter == 60:
break

def main():
for npts in 100, 300, 1000:
for infile in sorted(glob.glob(os.path.join('images', '*'))):
print infile
outroot = '%s/%s_%i' % (
'outdir',
os.path.splitext(os.path.basename(infile))[0], npts)

# race condition!
lock = outroot + '.lock'
if os.path.exists(lock):
continue
with open(lock, 'w') as f:
pass

process_image(infile, outroot, npts)

if __name__ == '__main__':
main()
``````

3

## 使用图像能量作为点权重图

1. 对于每个图像，创建一个清晰度图。清晰度图由归一化的图像能量（或图像的高频信号的平方）定义。一个示例如下所示：

1. 从图像生成许多点，锐度图中的点占70％，其他所有点占30％。这意味着从图像的高细节部分会更密集地采样点。
2. 颜色！

## 结果

`N` = 100、500、1000、3000

14

By using our site, you acknowledge that you have read and understand our Cookie Policy and Privacy Policy.
Licensed under cc by-sa 3.0 with attribution required.