Skip to content

Commit

Permalink
Introduce a (barely) working InflationGrader
Browse files Browse the repository at this point in the history
Added Distributor classes that calculate best cell distribution and an Approximator class that decides which Chops to choose to meet them;
rewrote SmoothGrader base the whole InflationGrader on those.
Fixed a bug in grading relations (border case with count=1)

To do:
- handle cases where both ends of a Wire are on walls;
- write tests!
- refactor: organize autograding source and its hierarchy and their respective tests
- get rid of repeated code (LayerStack in Params and Distributor)
- add direct imports
- test/use graders with some serious examples
  • Loading branch information
FranzBangar committed Dec 29, 2024
1 parent 022c19a commit 57577b1
Show file tree
Hide file tree
Showing 11 changed files with 385 additions and 117 deletions.
13 changes: 10 additions & 3 deletions examples/advanced/inflation_grader.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,20 @@
shape = cb.ExtrudedShape(base, 1)

# turn one block around to test grader's skillz
shape.grid[1][0].rotate(np.pi, [0, 0, 1])
shape.grid[0][1].rotate(np.pi, [0, 0, 1])

mesh.add(shape)

mesh.set_default_patch("boundary", "patch")
for i in (0, 1, 2):
for i in (0, 2):
shape.operations[i].set_patch("front", "walls")
shape.operations[1].set_patch("back", "walls")

for i in (3, 4, 5):
shape.operations[i].set_patch("back", "walls")

for i in (0, 3):
shape.operations[i].set_patch("left", "walls")

mesh.modify_patch("walls", "wall")

Expand All @@ -32,7 +39,7 @@
vertex = list(finder.find_in_sphere(point))[0]
vertex.translate([0, 0.8, 0])

grader = InflationGrader(mesh, 1e-2, 0.1)
grader = InflationGrader(mesh, 5e-3, 0.1)
grader.grade(take="max")

mesh.write(os.path.join("..", "case", "system", "blockMeshDict"), debug_path="debug.vtk")
145 changes: 145 additions & 0 deletions src/classy_blocks/grading/autograding/params/approximator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
import dataclasses
import itertools
from typing import List, Sequence

import numpy as np

from classy_blocks.grading.chop import Chop
from classy_blocks.types import FloatListType
from classy_blocks.util.constants import TOL


@dataclasses.dataclass
class Piece:
coords: FloatListType

@property
def point_count(self) -> int:
return len(self.coords)

@property
def cell_count(self) -> int:
return self.point_count - 1

@property
def start_size(self) -> float:
return abs(self.coords[1] - self.coords[0])

@property
def end_size(self) -> float:
return abs(self.coords[-1] - self.coords[-2])

@property
def length(self) -> float:
return abs(self.coords[-1] - self.coords[0])

@property
def total_expansion(self) -> float:
return self.end_size / self.start_size

@property
def is_simple(self) -> bool:
"""If all sizes are equal, this is a simple grading case"""
return abs(self.start_size - self.end_size) < TOL

def get_chop_coords(self) -> FloatListType:
"""Calculates coordinates as will be produced by a Chop"""
if self.is_simple:
return np.linspace(self.coords[0], self.coords[-1], num=self.point_count)

sizes = np.geomspace(self.start_size, self.end_size, num=self.cell_count)
coords = np.ones(self.point_count) * self.coords[0]

add = np.cumsum(sizes)

if self.coords[-1] < self.coords[0]:
add *= -1

coords[1:] += add

return coords

def get_fitness(self) -> float:
differences = (self.coords - self.get_chop_coords()) ** 2
return float(np.sum(differences) / self.cell_count)

def get_chop(self) -> Chop:
return Chop(
length_ratio=self.length,
count=self.cell_count,
total_expansion=self.total_expansion,
)


class Approximator:
"""Takes a list of arbitrary cell sizes and creates Chop objects
that blockMesh will understand; Tries to minimize differences
between the rudimentary total expansion + count and actual
desired (optimized) cell sizes.
In a possible future scenario where a custom mesher would be employed,
actual cell sizes could be used without intervention of this object."""

def __init__(self, coords: FloatListType):
self.coords = coords
self.sizes = np.diff(self.coords)
self.ratios = np.roll(self.sizes, -1)[:-1] / self.sizes[:-1]
self.count = len(coords) - 1

@property
def length(self) -> float:
return abs(self.coords[-1] - self.coords[0])

@property
def is_simple(self) -> bool:
"""Returns True if this is a simple one-chop equal-size scenario"""
return max(self.sizes) - min(self.sizes) < TOL

def get_pieces(self, indexes: List[int]) -> List[Piece]:
"""Creates Piece objects between given indexes (adds 0 and -1 automatically);
does not check if count is smaller than length of indexes"""
indexes = [0, *indexes]

pieces = [Piece(self.coords[indexes[i] : indexes[i + 1] + 1]) for i in range(len(indexes) - 1)]
pieces.append(Piece(self.coords[indexes[-1] :]))

return pieces

def get_chops(self, number: int) -> List[Chop]:
if self.is_simple:
# don't bother
return [Chop(count=self.count)]

# limit number of chops so there's no zero-cell chops
number = min(number + 1, self.count - 1)

# brute force: choose the best combination of chops
# but don't overdo it; just try a couple of scenarios
refinement = 4
indexes = np.linspace(0, self.count + 1, num=number * refinement, dtype="int")[1:-1]

best_fit = 1e12
best_scenario: Sequence[int] = [0] * (number - 1)

combinations = itertools.combinations(indexes, r=number - 2)
for scenario in combinations:
try:
pieces = self.get_pieces(scenario) # type:ignore
fit = sum(piece.get_fitness() for piece in pieces)
if fit < best_fit:
best_fit = fit
best_scenario = scenario
except (IndexError, ValueError):
# obviously not the best scenario, eh?
continue

pieces = self.get_pieces(best_scenario) # type:ignore

chops = [piece.get_chop() for piece in pieces]

# normalize length ratios
length = self.length
for chop in chops:
chop.length_ratio = chop.length_ratio / length

return chops
99 changes: 36 additions & 63 deletions src/classy_blocks/grading/autograding/params/distributor.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import scipy.interpolate
import scipy.optimize

from classy_blocks.grading.autograding.params.base import sum_length
from classy_blocks.grading.autograding.params.approximator import Approximator
from classy_blocks.grading.autograding.params.layers import InflationLayer
from classy_blocks.grading.chop import Chop
from classy_blocks.types import FloatListType
Expand All @@ -17,7 +17,7 @@
class DistributorBase(abc.ABC):
"""Algorithm that creates chops from given count and sizes;
first distributes cells using a predefined 'ideal' sizes and ratios,
then optimizes their individual size to get as close to that ideal as possible.
then optimizes their *individual* size to get as close to that ideal as possible.
Then, when cells are placed, Chops are created so that they produce as similar
cells to the calculated as possible. Since blockMesh only supports equal
Expand All @@ -32,10 +32,10 @@ class DistributorBase(abc.ABC):

@staticmethod
def get_actual_ratios(coords: FloatListType) -> FloatListType:
# TODO: repeated code within Approximator! un-repeat
lengths = np.diff(coords)
return (lengths[:-1] / np.roll(lengths, -1)[:-1]) ** -1
return np.roll(lengths, -1)[:-1] / lengths[:-1]

@abc.abstractmethod
def get_ideal_ratios(self) -> FloatListType:
"""Returns desired cell-to-cell ratios"""
return np.ones(self.count + 1)
Expand All @@ -45,7 +45,9 @@ def get_ratio_weights(self) -> FloatListType:
"""Returns weights of cell ratios"""

def get_raw_coords(self) -> FloatListType:
# 'count' denotes number of 'intervals' so there must be another point
# 'count' denotes number of 'intervals' so add one more point to get points;
# first and last cells are added artificially ('ghost cells') to calculate
# proper expansion ratios and sizes
return np.concatenate(
([-self.size_before], np.linspace(0, self.length, num=self.count + 1), [self.length + self.size_after])
)
Expand All @@ -60,69 +62,31 @@ def get_smooth_coords(self) -> FloatListType:
def ratios(inner_coords):
coords[2:-2] = inner_coords

difference = self.get_actual_ratios(coords) - self.get_ideal_ratios()
return difference * self.get_ratio_weights()
# to prevent 'flipping' over and producing zero or negative length, scale with e^-ratio
difference = -(self.get_ratio_weights() * (self.get_actual_ratios(coords) - self.get_ideal_ratios()))
return np.exp(difference) - 1

scale = min(self.size_before, self.size_after, self.length / self.count) / 10
_ = scipy.optimize.least_squares(ratios, coords[2:-2], method="lm", ftol=scale / 100, x_scale=scale)
scale = min(self.size_before, self.size_after, self.length / self.count) / 100
_ = scipy.optimize.least_squares(ratios, coords[2:-2], ftol=scale / 10, x_scale=scale)

return coords
# omit the 'ghost' cells
return coords[1:-1]

@property
def is_simple(self) -> bool:
# Don't overdo basic, simple-graded blocks
base_size = self.length / self.count

# TODO: use a more relaxed criterion?
return base_size - self.size_before < TOL and base_size - self.size_after < TOL
return base_size - self.size_before < TOL and base_size - self.size_after < 10 * TOL

def get_chops(self, pieces: int) -> List[Chop]:
if self.is_simple:
return [Chop(count=self.count)]
approximator = Approximator(self.get_smooth_coords())
return approximator.get_chops(pieces)

def get_last_size(self) -> float:
coords = self.get_smooth_coords()
sizes = np.diff(coords)
ratios = self.get_actual_ratios(coords)

count = len(ratios) - 1

# create a piecewise linear function from a number of chosen indexes and their respective c2c ratios
# then optimize indexes to obtain best fit
# fratios = scipy.interpolate.interp1d(range(len(ratios)), ratios)

# def get_piecewise(indexes: List[int]) -> Callable:
# values = np.take(ratios, indexes)
# return scipy.interpolate.interp1d(indexes, values)

# def get_fitness(indexes: List[int]) -> float:
# fitted = get_piecewise(indexes)(range(len(ratios)))

# ss_tot = np.sum((ratios - np.mean(ratios)) ** 2)
# ss_res = np.sum((ratios - fitted) ** 2)

# return 1 - (ss_res / ss_tot)

# print(get_fitness([0, 9, 10, count]))
# print(get_fitness(np.linspace(0, count, num=pieces + 1, dtype=int)))

chops: List[Chop] = []
indexes = np.linspace(0, count, num=pieces + 1, dtype=int)

for i, index in enumerate(indexes[:-1]):
chop_ratios = ratios[index : indexes[i + 1]]
ratio = np.prod(chop_ratios)
chop_count = indexes[i + 1] - indexes[i]
avg_ratio = ratio ** (1 / chop_count)
length = sum_length(sizes[index], chop_count, avg_ratio)
chop = Chop(length_ratio=length, total_expansion=ratio, count=chop_count)
chops.append(chop)

# normalize length ratios
sum_ratio = sum([chop.length_ratio for chop in chops])
for chop in chops:
chop.length_ratio = chop.length_ratio / sum_ratio

return chops
return coords[-2] - coords[-3]


@dataclasses.dataclass
Expand All @@ -132,8 +96,9 @@ def get_ideal_ratios(self):
return super().get_ideal_ratios()

def get_ratio_weights(self):
# Enforce stricter policy on size_before and size_after
weights = np.ones(self.count + 1)
# Enforce stricter policy on the first few cells
# to match size_before and size_after
for i in (0, 1, 2, 3):
w = 2 ** (3 - i)
weights[i] = w
Expand All @@ -146,26 +111,34 @@ def get_ratio_weights(self):
class InflationDistributor(SmoothDistributor):
c2c_expansion: float
bl_thickness_factor: int
buffer_expansion: float
bulk_size: float

@property
def is_simple(self) -> bool:
return False

def get_ideal_ratios(self):
# TODO: combine this logic and LayerStack;
# possibly package all parameters into a separate dataclass
ratios = super().get_ideal_ratios()

# Ideal growth ratio in boundary layer is user-specified c2c_expansion;
inflation_layer = InflationLayer(self.size_before, self.c2c_expansion, self.bl_thickness_factor, 1e12)
inflation_count = inflation_layer.count
print(f"Inflation count: {inflation_count}")

ratios = super().get_ideal_ratios()

ratios[:inflation_count] = self.c2c_expansion
print(ratios)

# add a buffer layer if needed
last_inflation_size = inflation_layer.end_size
if self.bulk_size > self.buffer_expansion * last_inflation_size:
buffer_count = int(np.log(self.bulk_size / last_inflation_size) / np.log(self.buffer_expansion)) + 1
ratios[inflation_count : inflation_count + buffer_count] = self.buffer_expansion

return ratios

def get_ratio_weights(self):
return super().get_ratio_weights()

def _get_ratio_weights(self):
# using the same weights as in SmoothDistributor
# can trigger overflow warnings but doesn't produce
# better chops; thus, keep it simple
return np.ones(self.count + 1)
Loading

0 comments on commit 57577b1

Please sign in to comment.