#all_hardware
Generating Calibration Files
This is not for general use. Requires technical expertise.
Tools required: - An integrating sphere (we use a Spectra PT from LabSphere) - A HgAr lamp
Adjust the camera class as needed. This example uses the LucidCamera
. The are some LucidCamera specifc items in the below code that would need to be removed.
import os
import holoviews as hv
import numpy as np
from openhsi.calibrate import SettingsBuilderMixin, SpectraPTController, sum_gaussians
from openhsi.cameras import LucidCamera
"bokeh", logo=False)
hv.extension(import panel as pn
class CalibrateCamera(SettingsBuilderMixin, LucidCamera):
pass
= "../cals/cam_settings_lucid_template.json"
json_path_template = ""
pkl_path
= 18
modelno
print("".format(modelno))
= "../cals/OpenHSI-{0:02d}/OpenHSI-{0:02d}_settings_Mono8_bin1.json".format(
json_path_target
modelno
)= "../cals/OpenHSI-{0:02d}/OpenHSI-{0:02d}_calibration_Mono8_bin1.pkl".format(
pkl_path_target
modelno
)
if not os.path.isdir(os.path.dirname(json_path_target)):
os.mkdir(os.path.dirname(json_path_target))
= SpectraPTController() spt
Find illuminated sensor area
The vertical directon/y axis of the detector array corrspeonds the across-track direction of the sensor. If the image of slit is shorter then the heigh we can crop the top and bottom to save bandwidth/disk space (similar to letterboxing video).
There are two ways to do this, croping after the fact using row_minmax or by setting up a window on the sensor. Setting up a window will reduce the ammount of data transfered from the sensor and can improve maximum framerate depending on the sensor so is recomended.
1. Take a flat field
First step is to provide a uniform illumination to the slit, ideally spectrally broadband, like a halogen lamp or the sun.
# Select luminance value 10000 on the SpectraPT
10000)
spt.selectPreset(
# Initialize the CalibrateCamera class with specified parameters
with CalibrateCamera(
=json_path_template, # Path to a JSON file containing camera settings template
json_path="", # Path to a pickle file (not used here since it's an empty string)
pkl_path=-1, # Processing level (specific to CalibrateCamera, meaning unknown)
processing_lvl=20 # Exposure time in milliseconds
exposure_msas cam:
)
# Retake the flat field image and display it using Holoviews (hvim_flat)
= cam.retake_flat_field(show=True)
hvim_flat =600, height=600, axiswise=True)
hvim_flat.opts(width
# Update the row min/max values and display them (hvim_row_minmax)
= cam.update_row_minmax(edgezone=0)
hvim_row_minmax =600, height=600, axiswise=True)
hvim_row_minmax.opts(width
# Calculate the window height based on the row slice and ensure it's a multiple of 4 (required for LucidCameras)
= int(
windowheight "row_slice"][1] - cam.settings["row_slice"][0]) / 4.0) * 4
np.ceil((cam.settings[
)print("Windowheight {}".format(windowheight))
# Update camera settings based on the calculated window height:
# - Set the window resolution (height, width)
"win_resolution"] = [windowheight + 16, cam.settings["resolution"][1]]
cam.settings[
# - Set the window offset (adjusting for potential padding)
"win_offset"] = [
cam.settings[int(np.ceil((cam.settings["row_slice"][0]) / 4.0) * 4) - 8,
"win_offset"][1],
cam.settings[
]
# - Update the row slice (region of interest)
"row_slice"] = [16, windowheight - 8]
cam.settings[
# Set the overall camera resolution to match the window resolution
"resolution"] = cam.settings["win_resolution"]
cam.settings[
# Save the updated camera settings to JSON and pickle files
=json_path_target, pkl_path=pkl_path_target)
cam.dump(json_path
# Display the row min/max and flat field images side-by-side using Panel
pn.Column(hvim_row_minmax, hvim_flat)
# Initialize the CalibrateCamera class with specified parameters. and setting from previous cell.
with CalibrateCamera(
=50,
n_lines=0,
processing_lvl=pkl_path_target,
pkl_path=json_path_target,
json_path=10,
exposure_msas cam:
) # cam.collect()
cam.start_cam()= cam.get_img()
img = cam.crop(img)
img
cam.stop_cam()# cam.show(hist_eq=True)
# check the window looks ok.
=(0, 0, *img.shape)).opts(
hv.Image(img, bounds="wavelength index",
xlabel="cross-track",
ylabel="gray",
cmap="test frame",
title=400,
width=400,
height )
2. Take Arc and setup wavelength scale, and get window for 430 to 900nm
with CalibrateCamera(
=json_path_target, # Path to the JSON file with camera settings
json_path="", # Path to a pickle file (not used here)
pkl_path=-1 # Processing level (specific to CalibrateCamera)
processing_lvlas cam:
)
# Set the camera gain value to 10.0 - LUCIDCAMERA ONLY
"Gain"].value = 10.0
cam.deviceSettings[
# Capture a HgAr (Mercury-Argon) spectrum image and display it
= cam.retake_HgAr(show=True, nframes=18) # Capture 18 frames and average them
hvimg =600, height=600) # Set display options for the image
hvimg.opts(width
# Print the maximum pixel value in the captured HgAr image
print(cam.calibration["HgAr_pic"].max())
# Calculate and update the "smile" shifts (geometric distortion correction)
= cam.update_smile_shifts()
smile_fit_hv
# Reset the smile shifts to zero (likely for testing or specific calibration purposes)
"smile_shifts"] = cam.calibration["smile_shifts"] * 0
cam.calibration[
# Perform wavelength calibration using the HgAr spectrum
= cam.fit_HgAr_lines(
wavefit_hv =15, # Use the top 15 brightest peaks for fitting
top_k=[546.96, 435.833, (579.960 + 579.066) / 2, 763.511], # Known HgAr peak wavelengths
brightest_peaks=10, # Parameters for peak detection
find_peaks_height=1,
prominence=1.5,
width=True, # Allow interactive selection of peaks
interactive_peak_id
)
# Define the desired wavelength range for the "window"
= [430, 900] # Wavelength range in nanometers
waveminmax
# Find the corresponding indices in the wavelength array
= [
waveminmax_ind abs(cam.calibration["wavelengths_linear"] - λ)) for λ in waveminmax
np.argmin(np.
]
# Calculate the window width and offset based on the wavelength indices
= int(np.ceil((waveminmax_ind[1] - waveminmax_ind[0] + 8) / 4.0) * 4)
window_width = int(np.floor((waveminmax_ind[0] - 4) / 4.0) * 4)
offset_x print("Window Width {}, offset x {}".format(window_width, offset_x))
# Update camera settings with the new window parameters
"win_resolution"][1] = window_width # Set window width
cam.settings["win_offset"][1] = offset_x # Set horizontal offset
cam.settings["resolution"] = cam.settings["win_resolution"] # Update overall resolution
cam.settings[
# Display the HgAr image, smile fit data, and wavelength calibration results using Panel
pn.Column(
hvimg,
smile_fit_hv,=(390, 1000), ylim=(-10, 255)).opts(shared_axes=False),
wavefit_hv.opts(xlim )
# check the window looks ok.
pn.Column(=False),
hvimg.opts(shared_axes=False),
smile_fit_hv.opts(shared_axes=(400, 900), ylim=(-10, 255)).opts(shared_axes=False),
wavefit_hv.opts(xlim )
# save wavefit if things look ok
=json_path_target, pkl_path=pkl_path_target) cam.dump(json_path
3. Retake flat field and arc with windows
10000)
spt.selectPreset(
# retake flat frame with wavelegth window set.
with CalibrateCamera(
=json_path_target, pkl_path=pkl_path_target, processing_lvl=-1
json_pathas cam:
) = cam.retake_flat_field(show=True)
hvim_flat =600, height=600, axiswise=True)
hvim_flat.opts(width
= cam.update_row_minmax(edgezone=8)
hvim_row_minmax =600, height=600, axiswise=True)
hvim_row_minmax.opts(width
cam.update_resolution()=json_path_target, pkl_path=pkl_path_target)
cam.dump(json_path
spt.turnOffLamp()
# display and check all looks ok.
+ hvim_flat hvim_row_minmax
Redo Arc with window.
# retake arc frames and set wavelength scale foir window.
with CalibrateCamera(
=json_path_target, pkl_path=pkl_path_target, processing_lvl=-1
json_pathas cam:
) "Gain"].value = 15.0
cam.deviceSettings[= cam.retake_HgAr(show=True)
hvimg
=400, height=400)
hvimg.opts(widthprint(cam.calibration["HgAr_pic"].max())
= cam.update_smile_shifts()
smile_fit_hv
= cam.fit_HgAr_lines(
wavefit_hv =12,
top_k=[546.96, 435.833, (579.960 + 579.066) / 2, 871.66, 763.511],
brightest_peaks=10,
find_peaks_height=1,
prominence=1.5,
width=2,
max_match_error=True,
interactive_peak_id# [435.833,546.074,(579.960+579.066)/2,763.511]
)
cam.update_intsphere_fit()
=json_path_target, pkl_path=pkl_path_target)
cam.dump(json_path
+ smile_fit_hv + wavefit_hv.opts(xlim=(400, 900), ylim=(-10, 255))).opts(
(hvimg =False
shared_axes )
3. Get Integrating Sphere data for radiance calibration
4D datacube with coordinates of cross-track, wavelength, exposure, and luminance.
= np.fromiter(lum_preset_dict.keys(), dtype=int)
luminances # luminances = np.append(luminances,0)
= [0, 5, 8, 10, 15, 20]
exposures
with CalibrateCamera(
=json_path_target, pkl_path=pkl_path_target, processing_lvl=-1
json_pathas cam:
)
"rad_ref"] = cam.update_intsphere_cube(
cam.calibration[=50, lum_chg_func=spt.selectPreset
exposures, luminances, noframe
)
# remove saturated images
"rad_ref"] = cam.calibration["rad_ref"].where(
cam.calibration[~(
sum((cam.calibration["rad_ref"][:, :, :, :, :] == 255), axis=(1, 2))
np.> 1000
)
)=json_path_target, pkl_path=pkl_path_target)
cam.dump(json_path
spt.turnOffLamp()
"rad_ref"].plot(
cam.calibration[="cross_track", x="wavelength_index", col="exposure", row="luminance", cmap="gray"
y )
print("rad_ref is {} MB".format(cam.calibration["rad_ref"].size / 1024 / 1024 * 4))
cam.update_intsphere_fit()=json_path_target, pkl_path=pkl_path_target) cam.dump(json_path