Skip to content

Commit

Permalink
Merge pull request #188 from outofculture/166_surfs_up
Browse files Browse the repository at this point in the history
Surface detection and faster z-stacks
  • Loading branch information
outofculture committed Jan 25, 2024
2 parents f1c7f20 + feb5af0 commit fa5dadc
Show file tree
Hide file tree
Showing 6 changed files with 44 additions and 51 deletions.
68 changes: 35 additions & 33 deletions acq4/devices/Camera/Camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import time

from typing import Callable, Optional
from contextlib import contextmanager
from contextlib import contextmanager, ExitStack
from six.moves import range

import pyqtgraph as pg
Expand Down Expand Up @@ -256,7 +256,7 @@ def stop(self, block=True):
self.acqThread.stop(block=block)

@contextmanager
def run(self, ensureFreshFrames=False):
def ensureRunning(self, ensureFreshFrames=False):
"""Context manager for starting and stopping camera acquisition thread. If used
with non-blocking frame acquisition, this will still exit the context before
the frames are necessarily acquired.
Expand All @@ -279,7 +279,7 @@ def run(self, ensureFreshFrames=False):
if not running:
self.stop()

def acquireFrames(self, n=None) -> "FrameAcquisitionFuture":
def acquireFrames(self, n=None, ensureFreshFrames=False) -> "FrameAcquisitionFuture":
"""Acquire a specific number of frames and return a FrameAcquisitionFuture.
If *n* is None, then frames will be acquired until future.stop() is called.
Expand All @@ -288,7 +288,9 @@ def acquireFrames(self, n=None) -> "FrameAcquisitionFuture":
This method works by collecting frames as they stream from the camera and does not
handle starting / stopping / configuring the camera.
"""
return FrameAcquisitionFuture(self, n)
if n is None and ensureFreshFrames:
raise ValueError("ensureFreshFrames=True is not compatible with n=None")
return FrameAcquisitionFuture(self, n, ensureFreshFrames=ensureFreshFrames)

@Future.wrap
def driverSupportedFixedFrameAcquisition(self, n: int = 1, _future: Future = None) -> list["Frame"]:
Expand Down Expand Up @@ -329,10 +331,7 @@ def quit(self):
def getEstimatedFrameRate(self, _future: Future):
"""Return the estimated frame rate of the camera.
"""
if not self.isRunning():
with self.run():
return _future.waitFor(self.acqThread.getEstimatedFrameRate()).getResult()
else:
with self.ensureRunning():
return _future.waitFor(self.acqThread.getEstimatedFrameRate()).getResult()

# @ftrace
Expand Down Expand Up @@ -976,11 +975,12 @@ def reset(self):


class FrameAcquisitionFuture(Future):
def __init__(self, camera: Camera, frameCount: Optional[int], timeout: float = 10):
def __init__(self, camera: Camera, frameCount: Optional[int], timeout: float = 10, ensureFreshFrames: bool = False):
"""Acquire a frames asynchronously, either a fixed number or continuously until stopped."""
super().__init__()
self._camera = camera
self._frame_count = frameCount
self._ensure_fresh_frames = ensureFreshFrames
self._stop_when = None
self._frames = []
self._timeout = timeout
Expand All @@ -990,32 +990,34 @@ def __init__(self, camera: Camera, frameCount: Optional[int], timeout: float = 1

def _monitorAcquisition(self):
self._camera.acqThread.connectCallback(self.handleNewFrame)
try:
lastFrameTime = ptime.time()
while True:
if self.isDone():
break
try:
frame = self._queue.get_nowait()
lastFrameTime = ptime.time()
except queue.Empty:
with ExitStack() as stack:
if self._ensure_fresh_frames:
stack.enter_context(self._camera.ensureRunning(ensureFreshFrames=True))
try:
lastFrameTime = ptime.time()
while True:
if self.isDone():
break
try:
self.checkStop(0.1) # delay while checking for a stop request
except self.StopRequested:
self._taskDone(interrupted=self._frame_count is not None)
frame = self._queue.get_nowait()
lastFrameTime = ptime.time()
except queue.Empty:
try:
self.checkStop(0.1) # delay while checking for a stop request
except self.StopRequested:
self._taskDone(interrupted=self._frame_count is not None)
break
if ptime.time() - lastFrameTime > self._timeout:
self._taskDone(interrupted=True, error=TimeoutError("Timed out waiting for frames"))
continue
self._frames.append(frame)
if self._stop_when is not None and self._stop_when(frame):
self._taskDone()
break
if ptime.time() - lastFrameTime > self._timeout:
self._taskDone(interrupted=True, error=TimeoutError("Timed out waiting for frames"))
continue
self._frames.append(frame)
if self._stop_when is not None and self._stop_when(frame):
self._taskDone()
break
if self._frame_count is not None and len(self._frames) >= self._frame_count:
self._taskDone()

finally:
self._camera.acqThread.disconnectCallback(self.handleNewFrame)
if self._frame_count is not None and len(self._frames) >= self._frame_count:
self._taskDone()
finally:
self._camera.acqThread.disconnectCallback(self.handleNewFrame)

def handleNewFrame(self, frame):
self._queue.put(frame)
Expand Down
6 changes: 0 additions & 6 deletions acq4/devices/Camera/CameraInterface.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,13 +345,7 @@ def boundingRect(self):
"""
return self.cam.getBoundary().boundingRect()

def takeImage(self, closeShutter=None):
# TODO this is maybe unused
# closeShutter is used for laser scanning devices; we can ignore it here.
with self.getDevice().run():
return self.getDevice().acquireFrames(1).getResult()[0]


class CameraItemGroup(DeviceTreeItemGroup):
def __init__(self, camera, includeSubdevices=True):
DeviceTreeItemGroup.__init__(self, device=camera, includeSubdevices=includeSubdevices)
Expand Down
2 changes: 1 addition & 1 deletion acq4/devices/Pipette/pipette.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ def _solveGlobalStagePosition(self, pos):
spos = np.asarray(stage.globalPosition())
return spos + dif

def moveToLocal(self, pos, speed, linear=False):
def _moveToLocal(self, pos, speed, linear=False):
"""Move the electrode tip directly to the given position in local coordinates.
WARNING: This method does _not_ implement any motion planning.
"""
Expand Down
6 changes: 3 additions & 3 deletions acq4/devices/Pipette/tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,9 +193,9 @@ def takeReferenceFrames(
if zStep is None:
zStep = 1e-6
frames = _future.waitFor(runZStack(imager, (zStart, zEnd, zStep))).getResult()
_future.waitFor(self.dev.moveToLocal([-tipLength * 3, 0, 0], "slow"))
_future.waitFor(self.dev._moveToLocal([-tipLength * 3, 0, 0], "slow"))
bg_frames = _future.waitFor(runZStack(imager, (zStart, zEnd, zStep))).getResult()
_future.waitFor(self.dev.moveToLocal([tipLength * 3, 0, 0], "slow"))
_future.waitFor(self.dev._moveToLocal([tipLength * 3, 0, 0], "slow"))
key = imager.getDeviceStateKey()
maxInd = np.argmax([imageTemplateMatch(f.data(), center)[1] for f in frames])
self.reference[key] = {
Expand Down Expand Up @@ -240,7 +240,7 @@ def measureTipPosition(
# move pipette and take a background frame
if pos is None:
pos = self.dev.globalPosition()
self.dev.moveToLocal([-tipLength * 3, 0, 0], "fast").wait()
self.dev._moveToLocal([-tipLength * 3, 0, 0], "fast").wait()
bg_frame = self.takeFrame()
else:
bg_frame = None
Expand Down
6 changes: 2 additions & 4 deletions acq4/devices/Scanner/DeviceGui.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,8 +365,7 @@ def sample(self):
xRange = (self.ui.xMinSpin.value(), self.ui.xMaxSpin.value())
yRange = (self.ui.yMinSpin.value(), self.ui.yMaxSpin.value())

with camera.run(ensureFreshFrames=True):
background = camera.acquireFrames(1).getResult()[0]
background = camera.acquireFrames(1, ensureFreshFrames=True).getResult()[0]

laser.setAlignmentMode()
try:
Expand All @@ -382,8 +381,7 @@ def sample(self):
y = yRange[0] + dy * j
positions.append([x, y])
self.dev.setCommand([x, y])
with camera.run(ensureFreshFrames=True):
images.append(camera.acquireFrames(1)[0])
images.append(camera.acquireFrames(1, ensureFreshFrames=True).getResult()[0])
finally:
laser.closeShutter()
return background.data(), images, positions
Expand Down
7 changes: 3 additions & 4 deletions acq4/util/imaging/sequencer.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def _slow_z_stack(imager, start, end, step, _future) -> list["Frame"]:
step = sign * abs(step)
frames_fut = imager.acquireFrames()
_set_focus_depth(imager, start, direction, speed='fast', future=_future)
with imager.run(ensureFreshFrames=True):
with imager.ensureRunning(ensureFreshFrames=True):
for z in np.arange(start, end + step, step):
_future.waitFor(imager.acquireFrames(1))
_set_focus_depth(imager, z, direction, speed='slow', future=_future)
Expand Down Expand Up @@ -231,7 +231,7 @@ def runSequence(self):
meters_per_frame = abs(step)
speed = meters_per_frame * z_per_second * 0.5
future = imager.acquireFrames()
with imager.run(ensureFreshFrames=True):
with imager.ensureRunning(ensureFreshFrames=True):
imager.acquireFrames(1).wait() # just to be sure the camera's recording
_set_focus_depth(imager, end, direction, speed)
imager.acquireFrames(1).wait() # just to be sure the camera caught up
Expand All @@ -244,8 +244,7 @@ def runSequence(self):
self._frames = _slow_z_stack(imager, start, end, step).getResult()
self._frames = _enforce_linear_z_stack(self._frames, step)
else: # single frame
with imager.run(ensureFreshFrames=True):
self._frames.append(imager.acquireFrames(1).getResult()[0])
self._frames.append(imager.acquireFrames(1, ensureFreshFrames=True).getResult()[0])
self.sendStatusMessage(i, maxIter)
self.sleep(until=start + interval)
finally:
Expand Down

0 comments on commit fa5dadc

Please sign in to comment.