#all_hardwareGenerating Calibration Files
This is not for general use. Requires technical expertise.
Tools required: - An integrating sphere (we use a Spectra PT from LabSphere) - A HgAr lamp
Adjust the camera class as needed. This example uses the LucidCamera. The are some LucidCamera specifc items in the below code that would need to be removed.
import os
import holoviews as hv
import numpy as np
from openhsi.calibrate import SettingsBuilderMixin, SpectraPTController, sum_gaussians
from openhsi.cameras import LucidCamera
hv.extension("bokeh", logo=False)
import panel as pn
class CalibrateCamera(SettingsBuilderMixin, LucidCamera):
pass
json_path_template = "../cals/cam_settings_lucid_template.json"
cal_path = ""
modelno = 18
print("".format(modelno))
json_path_target = "../cals/OpenHSI-{0:02d}/OpenHSI-{0:02d}_settings_Mono8_bin1.json".format(
modelno
)
cal_path_target = "../cals/OpenHSI-{0:02d}/OpenHSI-{0:02d}_calibration_Mono8_bin1.pkl".format(
modelno
)
if not os.path.isdir(os.path.dirname(json_path_target)):
os.mkdir(os.path.dirname(json_path_target))
spt = SpectraPTController()Find illuminated sensor area
The vertical directon/y axis of the detector array corrspeonds the across-track direction of the sensor. If the image of slit is shorter then the heigh we can crop the top and bottom to save bandwidth/disk space (similar to letterboxing video).
There are two ways to do this, croping after the fact using row_minmax or by setting up a window on the sensor. Setting up a window will reduce the ammount of data transfered from the sensor and can improve maximum framerate depending on the sensor so is recomended.
1. Take a flat field
First step is to provide a uniform illumination to the slit, ideally spectrally broadband, like a halogen lamp or the sun.
# Select luminance value 10000 on the SpectraPT
spt.selectPreset(10000)
# Initialize the CalibrateCamera class with specified parameters
with CalibrateCamera(
json_path=json_path_template, # Path to a JSON file containing camera settings template
cal_path="", # Path to a pickle file (not used here since it's an empty string)
processing_lvl=-1, # Processing level (specific to CalibrateCamera, meaning unknown)
exposure_ms=20 # Exposure time in milliseconds
) as cam:
# Retake the flat field image and display it using Holoviews (hvim_flat)
hvim_flat = cam.retake_flat_field(show=True)
hvim_flat.opts(width=600, height=600, axiswise=True)
# Update the row min/max values and display them (hvim_row_minmax)
hvim_row_minmax = cam.update_row_minmax(edgezone=0)
hvim_row_minmax.opts(width=600, height=600, axiswise=True)
# Calculate the window height based on the row slice and ensure it's a multiple of 4 (required for LucidCameras)
windowheight = int(
np.ceil((cam.settings["row_slice"][1] - cam.settings["row_slice"][0]) / 4.0) * 4
)
print("Windowheight {}".format(windowheight))
# Update camera settings based on the calculated window height:
# - Set the window resolution (height, width)
cam.settings["win_resolution"] = [windowheight + 16, cam.settings["resolution"][1]]
# - Set the window offset (adjusting for potential padding)
cam.settings["win_offset"] = [
int(np.ceil((cam.settings["row_slice"][0]) / 4.0) * 4) - 8,
cam.settings["win_offset"][1],
]
# - Update the row slice (region of interest)
cam.settings["row_slice"] = [16, windowheight - 8]
# Set the overall camera resolution to match the window resolution
cam.settings["resolution"] = cam.settings["win_resolution"]
# Save the updated camera settings to JSON and pickle files
cam.dump(json_path=json_path_target, cal_path=cal_path_target)
# Display the row min/max and flat field images side-by-side using Panel
pn.Column(hvim_row_minmax, hvim_flat)# Initialize the CalibrateCamera class with specified parameters. and setting from previous cell.
with CalibrateCamera(
n_lines=50,
processing_lvl=0,
cal_path=cal_path_target,
json_path=json_path_target,
exposure_ms=10,
) as cam:
# cam.collect()
cam.start_cam()
img = cam.get_img()
img = cam.crop(img)
cam.stop_cam()
# cam.show(hist_eq=True)
# check the window looks ok.
hv.Image(img, bounds=(0, 0, *img.shape)).opts(
xlabel="wavelength index",
ylabel="cross-track",
cmap="gray",
title="test frame",
width=400,
height=400,
)2. Take Arc and setup wavelength scale, and get window for 430 to 900nm
with CalibrateCamera(
json_path=json_path_target, # Path to the JSON file with camera settings
cal_path="", # Path to a pickle file (not used here)
processing_lvl=-1 # Processing level (specific to CalibrateCamera)
) as cam:
# Set the camera gain value to 10.0 - LUCIDCAMERA ONLY
cam.deviceSettings["Gain"].value = 10.0
# Capture a HgAr (Mercury-Argon) spectrum image and display it
hvimg = cam.retake_HgAr(show=True, nframes=18) # Capture 18 frames and average them
hvimg.opts(width=600, height=600) # Set display options for the image
# Print the maximum pixel value in the captured HgAr image
print(cam.calibration["HgAr_pic"].max())
# Calculate and update the "smile" shifts (geometric distortion correction)
smile_fit_hv = cam.update_smile_shifts()
# Reset the smile shifts to zero (likely for testing or specific calibration purposes)
cam.calibration["smile_shifts"] = cam.calibration["smile_shifts"] * 0
# Perform wavelength calibration using the HgAr spectrum
wavefit_hv = cam.fit_HgAr_lines(
top_k=15, # Use the top 15 brightest peaks for fitting
brightest_peaks=[546.96, 435.833, (579.960 + 579.066) / 2, 763.511], # Known HgAr peak wavelengths
find_peaks_height=10, # Parameters for peak detection
prominence=1,
width=1.5,
interactive_peak_id=True, # Allow interactive selection of peaks
)
# Define the desired wavelength range for the "window"
waveminmax = [430, 900] # Wavelength range in nanometers
# Find the corresponding indices in the wavelength array
waveminmax_ind = [
np.argmin(np.abs(cam.calibration["wavelengths_linear"] - λ)) for λ in waveminmax
]
# Calculate the window width and offset based on the wavelength indices
window_width = int(np.ceil((waveminmax_ind[1] - waveminmax_ind[0] + 8) / 4.0) * 4)
offset_x = int(np.floor((waveminmax_ind[0] - 4) / 4.0) * 4)
print("Window Width {}, offset x {}".format(window_width, offset_x))
# Update camera settings with the new window parameters
cam.settings["win_resolution"][1] = window_width # Set window width
cam.settings["win_offset"][1] = offset_x # Set horizontal offset
cam.settings["resolution"] = cam.settings["win_resolution"] # Update overall resolution
# Display the HgAr image, smile fit data, and wavelength calibration results using Panel
pn.Column(
hvimg,
smile_fit_hv,
wavefit_hv.opts(xlim=(390, 1000), ylim=(-10, 255)).opts(shared_axes=False),
)# check the window looks ok.
pn.Column(
hvimg.opts(shared_axes=False),
smile_fit_hv.opts(shared_axes=False),
wavefit_hv.opts(xlim=(400, 900), ylim=(-10, 255)).opts(shared_axes=False),
)# save wavefit if things look ok
cam.dump(json_path=json_path_target, cal_path=cal_path_target)3. Retake flat field and arc with windows
spt.selectPreset(10000)
# retake flat frame with wavelegth window set.
with CalibrateCamera(
json_path=json_path_target, cal_path=cal_path_target, processing_lvl=-1
) as cam:
hvim_flat = cam.retake_flat_field(show=True)
hvim_flat.opts(width=600, height=600, axiswise=True)
hvim_row_minmax = cam.update_row_minmax(edgezone=8)
hvim_row_minmax.opts(width=600, height=600, axiswise=True)
cam.update_resolution()
cam.dump(json_path=json_path_target, cal_path=cal_path_target)
spt.turnOffLamp()
# display and check all looks ok.
hvim_row_minmax + hvim_flatRedo Arc with window.
# retake arc frames and set wavelength scale foir window.
with CalibrateCamera(
json_path=json_path_target, cal_path=cal_path_target, processing_lvl=-1
) as cam:
cam.deviceSettings["Gain"].value = 15.0
hvimg = cam.retake_HgAr(show=True)
hvimg.opts(width=400, height=400)
print(cam.calibration["HgAr_pic"].max())
smile_fit_hv = cam.update_smile_shifts()
wavefit_hv = cam.fit_HgAr_lines(
top_k=12,
brightest_peaks=[546.96, 435.833, (579.960 + 579.066) / 2, 871.66, 763.511],
find_peaks_height=10,
prominence=1,
width=1.5,
max_match_error=2,
interactive_peak_id=True,
) # [435.833,546.074,(579.960+579.066)/2,763.511]
cam.update_intsphere_fit()
cam.dump(json_path=json_path_target, cal_path=cal_path_target)
(hvimg + smile_fit_hv + wavefit_hv.opts(xlim=(400, 900), ylim=(-10, 255))).opts(
shared_axes=False
)3. Get Integrating Sphere data for radiance calibration
4D datacube with coordinates of cross-track, wavelength, exposure, and luminance.
luminances = np.fromiter(lum_preset_dict.keys(), dtype=int)
# luminances = np.append(luminances,0)
exposures = [0, 5, 8, 10, 15, 20]
with CalibrateCamera(
json_path=json_path_target, cal_path=cal_path_target, processing_lvl=-1
) as cam:
cam.calibration["rad_ref"] = cam.update_intsphere_cube(
exposures, luminances, noframe=50, lum_chg_func=spt.selectPreset
)
# remove saturated images
cam.calibration["rad_ref"] = cam.calibration["rad_ref"].where(
~(
np.sum((cam.calibration["rad_ref"][:, :, :, :, :] == 255), axis=(1, 2))
> 1000
)
)
cam.dump(json_path=json_path_target, cal_path=cal_path_target)
spt.turnOffLamp()cam.calibration["rad_ref"].plot(
y="cross_track", x="wavelength_index", col="exposure", row="luminance", cmap="gray"
)print("rad_ref is {} MB".format(cam.calibration["rad_ref"].size / 1024 / 1024 * 4))cam.update_intsphere_fit()
cam.dump(json_path=json_path_target, cal_path=cal_path_target)