kopia lustrzana https://github.com/projecthorus/wenet
More autofocus options, add lens position to downlink telemetry
rodzic
72535848b6
commit
f25101005c
|
@ -36,7 +36,7 @@ class WENET_PACKET_TYPES:
|
|||
|
||||
|
||||
class WENET_PACKET_LENGTHS:
|
||||
GPS_TELEMETRY = 61
|
||||
GPS_TELEMETRY = 65
|
||||
ORIENTATION_TELEMETRY = 43
|
||||
IMAGE_TELEMETRY = 80
|
||||
|
||||
|
@ -208,7 +208,7 @@ def gps_telemetry_decoder(packet):
|
|||
# Wrap the next bit in exception handling.
|
||||
try:
|
||||
# Unpack the packet into a list.
|
||||
data = struct.unpack(">BHIBffffffBBBffHffff", packet)
|
||||
data = struct.unpack(">BHIBffffffBBBffHfffff", packet)
|
||||
|
||||
gps_data['week'] = data[1]
|
||||
gps_data['iTOW'] = data[2]/1000.0 # iTOW provided as milliseconds, convert to seconds.
|
||||
|
@ -230,6 +230,7 @@ def gps_telemetry_decoder(packet):
|
|||
gps_data['load_avg_5'] = round(data[17],3)
|
||||
gps_data['load_avg_15'] = round(data[18],3)
|
||||
gps_data['disk_percent'] = round(data[19],3)
|
||||
gps_data['lens_position'] = round(data[20],4)
|
||||
# Check to see if we actually have real data in these new fields.
|
||||
# If its an old transmitter, it will have 0x55 in these spots, which we can detect
|
||||
if gps_data['cpu_speed'] == 21845:
|
||||
|
@ -242,6 +243,7 @@ def gps_telemetry_decoder(packet):
|
|||
gps_data['load_avg_5'] = 0
|
||||
gps_data['load_avg_15'] = 0
|
||||
gps_data['disk_percent'] = -1.0
|
||||
gps_data['lens_position'] = -999.0
|
||||
|
||||
|
||||
# Perform some post-processing on the data, to make some of the fields easier to read.
|
||||
|
@ -303,7 +305,7 @@ def gps_telemetry_string(packet):
|
|||
if gps_data['error'] != 'None':
|
||||
return "GPS: ERROR Could not decode."
|
||||
else:
|
||||
gps_data_string = "GPS: %s Lat/Lon: %.5f,%.5f Alt: %dm, Speed: H %dkph V %.1fm/s, Heading: %d deg, Fix: %s, SVs: %d, DynModel: %s, Radio Temp: %.1f, CPU Temp: %.1f, CPU Speed: %d, Load Avg: %.2f, %.2f, %.2f, Disk Usage: %.1f%%" % (
|
||||
gps_data_string = "GPS: %s Lat/Lon: %.5f,%.5f Alt: %dm, Speed: H %dkph V %.1fm/s, Heading: %d deg, Fix: %s, SVs: %d, DynModel: %s, Radio Temp: %.1f, CPU Temp: %.1f, CPU Speed: %d, Load Avg: %.2f, %.2f, %.2f, Disk Usage: %.1f%%, Lens Pos: %.4f" % (
|
||||
gps_data['timestamp'],
|
||||
gps_data['latitude'],
|
||||
gps_data['longitude'],
|
||||
|
@ -320,7 +322,8 @@ def gps_telemetry_string(packet):
|
|||
gps_data['load_avg_1'],
|
||||
gps_data['load_avg_5'],
|
||||
gps_data['load_avg_15'],
|
||||
gps_data['disk_percent']
|
||||
gps_data['disk_percent'],
|
||||
gps_data['lens_position']
|
||||
)
|
||||
|
||||
return gps_data_string
|
||||
|
|
|
@ -113,6 +113,10 @@
|
|||
_new_gps_detailed += ", Load Avgs: " + msg.load_avg_1.toFixed(2) + "/" + msg.load_avg_5.toFixed(2) + "/" + msg.load_avg_15.toFixed(2)
|
||||
_new_gps_detailed += ", Disk Usage: " + msg.disk_percent.toFixed(2) +"%"
|
||||
_new_gps_detailed += ", CPU Speed: " + msg.cpu_speed +" MHz"
|
||||
|
||||
if(msg.lens_position > -900){
|
||||
_new_gps_detailed += ", Lens Position: " + msg.lens_position
|
||||
}
|
||||
$('#detail_gps_telem_data').html(_new_gps_detailed);
|
||||
|
||||
}
|
||||
|
|
|
@ -152,6 +152,9 @@ def handle_gps_telemetry(gps_data):
|
|||
_extra_fields['load_avg_15'] = gps_data['load_avg_15']
|
||||
_extra_fields['disk_percent'] = gps_data['disk_percent']
|
||||
|
||||
if gps_data['lens_position'] > -999.0:
|
||||
_extra_fields['lens_position'] = gps_data['lens_position']
|
||||
|
||||
sondehub.add_telemetry(
|
||||
current_callsign + "-Wenet",
|
||||
gps_data['timestamp'] + "Z",
|
||||
|
|
|
@ -97,6 +97,14 @@ sleep 10
|
|||
# Set a fixed focus position on a PiCam v3 (NOTE: The Picamv3 focus drifts with temperature - beware!!!)
|
||||
# 0.0 = Infinity
|
||||
# --lensposition 0.0 \
|
||||
# Set a user-defined AutoFocus Window Area, for use wiith PiCam v3 in Autofocus Mode
|
||||
# Must be provided as x,y,w,h , with all values between 0-1.0, where:
|
||||
# x: Starting X position of rectangle within frame, as fraction of frame width
|
||||
# y: Starting Y position of rectangle within frame, as fraction of frame height
|
||||
# w: Width of rectangle, as fraction of frame width
|
||||
# h: Height of rectangle, as fraction of frame height
|
||||
# e.g:
|
||||
# --afwindow 0.25,0.25,0.5,0.5
|
||||
|
||||
python3 tx_picamera2_gps.py \
|
||||
--rfm98w $SPIDEVICE \
|
||||
|
|
|
@ -255,7 +255,7 @@ class PacketTX(object):
|
|||
print(log_string)
|
||||
|
||||
|
||||
def transmit_gps_telemetry(self, gps_data):
|
||||
def transmit_gps_telemetry(self, gps_data, cam_metadata=None):
|
||||
""" Generate and Transmit a GPS Telemetry Packet.
|
||||
|
||||
Host platform CPU speed, temperature and load averages are collected and included in this packet too.
|
||||
|
@ -264,6 +264,7 @@ class PacketTX(object):
|
|||
gps_data: A dictionary, as produced by the UBloxGPS class. It must have the following fields:
|
||||
latitude, longitude, altitude, ground_speed, ascent_rate, heading, gpsFix, numSV,
|
||||
week, iTOW, leapS, dynamic_model.
|
||||
cam_metadata: An optional dictionary containing metadata about the current camera state.
|
||||
|
||||
|
||||
The generated packet format is in accordance with the specification in:
|
||||
|
@ -288,9 +289,15 @@ class PacketTX(object):
|
|||
except:
|
||||
_disk_percent = -1.0
|
||||
|
||||
_lens_position = -999.0
|
||||
if cam_metadata:
|
||||
if 'LensPosition' in cam_metadata:
|
||||
_lens_position = cam_metadata['LensPosition']
|
||||
|
||||
|
||||
# Construct the packet
|
||||
try:
|
||||
gps_packet = struct.pack(">BHIBffffffBBBffHffff",
|
||||
gps_packet = struct.pack(">BHIBffffffBBBffHfffff",
|
||||
1, # Packet ID for the GPS Telemetry Packet.
|
||||
gps_data['week'],
|
||||
int(gps_data['iTOW']*1000), # Convert the GPS week value to milliseconds, and cast to an int.
|
||||
|
@ -311,7 +318,8 @@ class PacketTX(object):
|
|||
_load_avg_1,
|
||||
_load_avg_5,
|
||||
_load_avg_15,
|
||||
_disk_percent
|
||||
_disk_percent,
|
||||
_lens_position
|
||||
)
|
||||
|
||||
self.queue_telemetry_packet(gps_packet)
|
||||
|
|
|
@ -53,6 +53,7 @@ class WenetPiCamera2(object):
|
|||
horizontal_flip = False,
|
||||
whitebalance = 'auto',
|
||||
lens_position = -1,
|
||||
af_window = None,
|
||||
temp_filename_prefix = 'picam_temp',
|
||||
debug_ptr = None,
|
||||
init_retries = 10
|
||||
|
@ -78,7 +79,12 @@ class WenetPiCamera2(object):
|
|||
lens_position: Lens Position setting (float), 0.0 = Infinity, 10 = very close.
|
||||
Only usable on Pi Camera v3 modules.
|
||||
Set to -1 to use continuous autofocus mode.
|
||||
|
||||
af_window: Area in the frame to use for autofocus. Defined as (x,y,w,h), all values between 0-1.0, where
|
||||
x: Starting X position of rectangle within frame, as fraction of frame width
|
||||
y: Starting Y position of rectangle within frame, as fraction of frame height
|
||||
w: Width of rectangle, as fraction of frame width
|
||||
h: Height of rectangle, as fraction of frame height
|
||||
If not provided, the default windowing (approx centre third of width/height) will be used.
|
||||
temp_filename_prefix: prefix used for temporary files.
|
||||
|
||||
debug_ptr: 'pointer' to a function which can handle debug messages.
|
||||
|
@ -96,8 +102,14 @@ class WenetPiCamera2(object):
|
|||
self.horizontal_flip = horizontal_flip
|
||||
self.vertical_flip = vertical_flip
|
||||
self.lens_position = lens_position
|
||||
self.af_window = af_window
|
||||
self.af_window_rectangle = None # Calculated during init
|
||||
self.autofocus_mode = False
|
||||
|
||||
# Camera metadata capture, so we can poll for camera stats regularly
|
||||
self.capture_in_progress = True
|
||||
self.cam_metadata = None
|
||||
|
||||
if whitebalance.lower() in self.wb_lookup:
|
||||
self.whitebalance = self.wb_lookup[whitebalance.lower()]
|
||||
else:
|
||||
|
@ -131,18 +143,37 @@ class WenetPiCamera2(object):
|
|||
|
||||
self.camera_properties = self.cam.camera_properties
|
||||
|
||||
self.debug_ptr("Camera Native Resolution: " + str(self.camera_properties['PixelArraySize']))
|
||||
self.debug_message("Camera Native Resolution: " + str(self.camera_properties['PixelArraySize']))
|
||||
|
||||
# Now we can calculate the AF Window information, if we have been ask to do so
|
||||
if self.af_window:
|
||||
_frame_x = self.camera_properties['PixelArraySize'][0]
|
||||
_frame_y = self.camera_properties['PixelArraySize'][1]
|
||||
try:
|
||||
_fields = self.af_window.split(",")
|
||||
if len(_fields) == 4:
|
||||
_x = int(float(_fields[0])*_frame_x)
|
||||
_y = int(float(_fields[1])*_frame_y)
|
||||
_w = int(float(_fields[2])*_frame_x)
|
||||
_h = int(float(_fields[3])*_frame_y)
|
||||
self.af_window_rectangle = (_x, _y, _w, _h)
|
||||
self.debug_message(f"Using AF Window: {str(self.af_window_rectangle)}")
|
||||
else:
|
||||
self.debug_message("Invalid AF Window definition! Needs 4 fields.")
|
||||
|
||||
except:
|
||||
self.af_window_rectangle = None
|
||||
|
||||
# If the user has explicitly specified the transmit image resolution, use it.
|
||||
if type(self.tx_resolution_init) == tuple:
|
||||
self.tx_resolution = self.tx_resolution_init
|
||||
self.debug_ptr(f"Transmit Resolution set to {str(self.tx_resolution)}")
|
||||
self.debug_message(f"Transmit Resolution set to {str(self.tx_resolution)}")
|
||||
# Otherwise, has the user provided a floating point scaling factor?
|
||||
elif type(self.tx_resolution_init) == float:
|
||||
res_x = 16*int(self.camera_properties['PixelArraySize'][0]*self.tx_resolution_init/16)
|
||||
res_y = 16*int(self.camera_properties['PixelArraySize'][1]*self.tx_resolution_init/16)
|
||||
self.tx_resolution = (res_x, res_y)
|
||||
self.debug_ptr(f"Transmit Resolution set to {str(self.tx_resolution)}, scaled {self.tx_resolution_init} from native.")
|
||||
self.debug_message(f"Transmit Resolution set to {str(self.tx_resolution)}, scaled {self.tx_resolution_init} from native.")
|
||||
|
||||
# Configure camera, including flip settings.
|
||||
capture_config = self.cam.create_still_configuration(
|
||||
|
@ -154,8 +185,9 @@ class WenetPiCamera2(object):
|
|||
|
||||
self.cam.set_controls(
|
||||
{'AwbMode': self.whitebalance,
|
||||
'AeMeteringMode': controls.AeMeteringModeEnum.Matrix,
|
||||
'NoiseReductionMode': controls.draft.NoiseReductionModeEnum.Off}
|
||||
'AeMeteringMode': controls.AeMeteringModeEnum.Matrix
|
||||
#'NoiseReductionMode': controls.draft.NoiseReductionModeEnum.Off
|
||||
}
|
||||
)
|
||||
|
||||
# Set Pi Camera 3 lens position
|
||||
|
@ -165,11 +197,18 @@ class WenetPiCamera2(object):
|
|||
self.cam.set_controls({"AfMode": controls.AfModeEnum.Manual, "LensPosition": self.lens_position})
|
||||
else:
|
||||
self.cam.set_controls({"AfMode": controls.AfModeEnum.Continuous})
|
||||
# Set AF Window if defined
|
||||
if self.af_window_rectangle:
|
||||
self.cam.set_controls({"AfWindows": [self.af_window_rectangle]})
|
||||
|
||||
|
||||
|
||||
# In autofocus mode, we need to start the camera now, so it can start figuring out its focus.
|
||||
if 'LensPosition' in self.cam.camera_controls and self.lens_position<0.0:
|
||||
self.debug_message("Enabling camera for image capture")
|
||||
self.cam.start()
|
||||
self.capture_in_progress = False
|
||||
|
||||
|
||||
# If we are not in autofocus mode, we start the camera only when we need it.
|
||||
# This may help deal with crashes after the camera is running for a long time, and also
|
||||
|
@ -212,8 +251,9 @@ class WenetPiCamera2(object):
|
|||
# TODO - Maybe expose some of these settings?
|
||||
self.cam.set_controls(
|
||||
{'AwbMode': self.whitebalance,
|
||||
'AeMeteringMode': controls.AeMeteringModeEnum.Matrix,
|
||||
'NoiseReductionMode': controls.draft.NoiseReductionModeEnum.Off}
|
||||
'AeMeteringMode': controls.AeMeteringModeEnum.Matrix
|
||||
#'NoiseReductionMode': controls.draft.NoiseReductionModeEnum.Off
|
||||
}
|
||||
)
|
||||
|
||||
# Set Pi Camera 3 lens position, or ensure we are in continuous autofocus mode.
|
||||
|
@ -223,7 +263,10 @@ class WenetPiCamera2(object):
|
|||
self.cam.set_controls({"AfMode": controls.AfModeEnum.Manual, "LensPosition": self.lens_position})
|
||||
else:
|
||||
self.cam.set_controls({"AfMode": controls.AfModeEnum.Continuous})
|
||||
|
||||
# Set AF Window if defined
|
||||
if self.af_window_rectangle:
|
||||
print("Set AfWindows")
|
||||
self.cam.set_controls({"AfWindows": [self.af_window_rectangle]})
|
||||
|
||||
# If we're not using autofocus, then camera would not have been started yet.
|
||||
# Start it now.
|
||||
|
@ -231,6 +274,7 @@ class WenetPiCamera2(object):
|
|||
try:
|
||||
self.debug_message("Enabling camera for image capture")
|
||||
self.cam.start()
|
||||
self.capture_in_progress = False
|
||||
except Exception as e:
|
||||
self.debug_message("Could not enable camera! - " + str(e))
|
||||
sleep(1)
|
||||
|
@ -239,12 +283,19 @@ class WenetPiCamera2(object):
|
|||
sleep(3)
|
||||
|
||||
# Attempt to capture a set of images.
|
||||
img_metadata = []
|
||||
for i in range(self.num_images):
|
||||
self.debug_message("Capturing Image %d of %d" % (i+1,self.num_images))
|
||||
# Wrap this in error handling in case we lose the camera for some reason.
|
||||
|
||||
try:
|
||||
self.cam.capture_file("%s_%d.jpg" % (self.temp_filename_prefix,i))
|
||||
self.capture_in_progress = True
|
||||
# Capture image
|
||||
metadata = self.cam.capture_file("%s_%d.jpg" % (self.temp_filename_prefix,i))
|
||||
# Save metadata for this frame
|
||||
img_metadata.append(metadata.copy())
|
||||
|
||||
self.capture_in_progress = False
|
||||
print(f"Image captured: {time.time()}")
|
||||
if self.image_delay > 0:
|
||||
sleep(self.image_delay)
|
||||
|
@ -255,6 +306,7 @@ class WenetPiCamera2(object):
|
|||
|
||||
if 'LensPosition' not in self.cam.camera_controls or self.lens_position>=0.0:
|
||||
self.debug_message("Disabling camera.")
|
||||
self.capture_in_progress = True
|
||||
self.cam.stop()
|
||||
|
||||
# Otherwise, continue to pick the 'best' image based on filesize.
|
||||
|
@ -264,7 +316,14 @@ class WenetPiCamera2(object):
|
|||
# Iterate through list of images and get the file sizes.
|
||||
for pic in pic_list:
|
||||
pic_sizes.append(os.path.getsize(pic))
|
||||
largest_pic = pic_list[pic_sizes.index(max(pic_sizes))]
|
||||
_largest_pic_idx = pic_sizes.index(max(pic_sizes))
|
||||
largest_pic = pic_list[_largest_pic_idx]
|
||||
|
||||
# Report the image pick results.
|
||||
if 'LensPosition' in img_metadata[_largest_pic_idx]:
|
||||
self.debug_message(f"Best Image was #{_largest_pic_idx}, Lens Pos: {img_metadata[_largest_pic_idx]['LensPosition']:.4f}")
|
||||
else:
|
||||
self.debug_message(f"Best Image was #{_largest_pic_idx}")
|
||||
|
||||
# Copy best image to target filename.
|
||||
self.debug_message("Copying image to storage with filename %s" % filename)
|
||||
|
@ -479,6 +538,21 @@ class WenetPiCamera2(object):
|
|||
except Exception as e:
|
||||
self.debug_message("Error reading CPU Freq - %s" % str(e))
|
||||
return -1
|
||||
|
||||
def get_camera_metadata(self):
|
||||
"""
|
||||
Query the camera for metadata, but only if a capture is currently not running
|
||||
(otherwise this can block for a while)
|
||||
If a capture is in progress, return the previous data.
|
||||
"""
|
||||
try:
|
||||
if self.capture_in_progress == False:
|
||||
if self.cam:
|
||||
self.cam_metadata = self.cam.capture_metadata()
|
||||
|
||||
return self.cam_metadata
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
# Basic transmission test script. TODO - Fix this, this is all incorrect..
|
||||
|
|
|
@ -32,7 +32,8 @@ parser.add_argument("--vflip", action='store_true', default=False, help="Flip ca
|
|||
parser.add_argument("--hflip", action='store_true', default=False, help="Flip captured image horizontally.")
|
||||
parser.add_argument("--resize", type=float, default=0.5, help="Resize raw image from camera by this factor before transmit (in both X/Y, to nearest multiple of 16 pixels). Default=0.5")
|
||||
parser.add_argument("--whitebalance", type=str, default='daylight', help="White Balance setting: Auto, Daylight, Cloudy, Incandescent, Tungesten, Fluorescent, Indoor")
|
||||
parser.add_argument("--lensposition", type=float, default=-1.0, help="For PiCam v3, set the lens position. Default: -1 = Autofocus")
|
||||
parser.add_argument("--lensposition", type=float, default=-1.0, help="For PiCam v3, set the lens position. Default: -1 = Continuous Autofocus")
|
||||
parser.add_argument("--afwindow", type=str, default=None, help="For PiCam v3 Autofocus mode, set the AutoFocus window, x,y,w,h , in fractions of frame size. (Default: None = default)")
|
||||
parser.add_argument("-v", "--verbose", action='store_true', default=False, help="Show additional debug info.")
|
||||
args = parser.parse_args()
|
||||
|
||||
|
@ -67,6 +68,7 @@ else:
|
|||
|
||||
|
||||
# Start up Wenet TX.
|
||||
picam = None
|
||||
tx = PacketTX.PacketTX(radio=radio, callsign=callsign, log_file="debug.log", udp_listener=55674)
|
||||
tx.start_tx()
|
||||
|
||||
|
@ -89,10 +91,17 @@ if args.gps.lower() != 'none':
|
|||
|
||||
def handle_gps_data(gps_data):
|
||||
""" Handle GPS data passed to us from the UBloxGPS instance """
|
||||
global max_altitude, tx, system_time_set
|
||||
global max_altitude, tx, system_time_set, picam
|
||||
|
||||
# Try and grab metadata from the camera. We send some of this in the telemetry.
|
||||
try:
|
||||
cam_metadata = picam.get_camera_metadata()
|
||||
#print(cam_metadata)
|
||||
except:
|
||||
cam_metadata = None
|
||||
|
||||
# Immediately generate and transmit a GPS packet.
|
||||
tx.transmit_gps_telemetry(gps_data)
|
||||
tx.transmit_gps_telemetry(gps_data, cam_metadata)
|
||||
|
||||
# If we have GPS fix, update the max altitude field.
|
||||
if (gps_data['altitude'] > max_altitude) and (gps_data['gpsFix'] == 3):
|
||||
|
@ -201,7 +210,9 @@ picam = WenetPiCamera2.WenetPiCamera2(
|
|||
vertical_flip=args.vflip,
|
||||
horizontal_flip=args.hflip,
|
||||
whitebalance=args.whitebalance,
|
||||
lens_position=args.lensposition)
|
||||
lens_position=args.lensposition,
|
||||
af_window=args.afwindow
|
||||
)
|
||||
# .. and start it capturing continuously.
|
||||
picam.run(destination_directory="./tx_images/",
|
||||
tx = tx,
|
||||
|
|
Ładowanie…
Reference in New Issue