X-Git-Url: http://git.hungrycats.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=driver%2Fxscreensaver-getimage-file;h=ba1ef308ab9d56e3e90d42293123e624a19fc13e;hb=refs%2Fheads%2Fmaster;hp=bbb1a250e1456e87fb6d641cd03335c5d9fc3896;hpb=8afc01a67be4fbf3f1cc0fce9adf01b5289a21c6;p=xscreensaver diff --git a/driver/xscreensaver-getimage-file b/driver/xscreensaver-getimage-file index bbb1a250..ba1ef308 100755 --- a/driver/xscreensaver-getimage-file +++ b/driver/xscreensaver-getimage-file @@ -1,5 +1,5 @@ #!/usr/bin/perl -w -# Copyright © 2001-2013 Jamie Zawinski . +# Copyright © 2001-2018 Jamie Zawinski . # # Permission to use, copy, modify, distribute, and sell this software and its # documentation for any purpose is hereby granted without fee, provided that @@ -45,10 +45,6 @@ import Fcntl ':mode' unless defined &S_ISUID; # but it is here in Perl 5.8 # but in Perl 5.10, both of these load, and cause errors! # So we have to check for S_ISUID instead of S_ISDIR? WTF? -use bytes; # Larry can take Unicode and shove it up his ass sideways. - # Perl 5.8.0 causes us to start getting incomprehensible - # errors about UTF-8 all over the place without this. - use Digest::MD5 qw(md5_base64); # Some Linux systems don't install LWP by default! @@ -57,7 +53,7 @@ BEGIN { eval 'use LWP::Simple;' } my $progname = $0; $progname =~ s@.*/@@g; -my $version = q{ $Revision: 1.38 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/; +my ($version) = ('$Revision: 1.52 $' =~ m/\s(\d[.\d]+)\s/s); my $verbose = 0; @@ -99,17 +95,18 @@ my $good_file_re = '\.(' . join("|", @good_extensions) . ')$'; # my @nondir_extensions = ('ai', 'bmp', 'bz2', 'cr2', 'crw', 'db', 'dmg', 'eps', 'gz', 'hqx', 'htm', 'html', 'icns', 'ilbm', 'mov', - 'nef', 'pbm', 'pdf', 'pl', 'ppm', 'ps', 'psd', 'sea', 'sh', 'shtml', - 'tar', 'tgz', 'thb', 'txt', 'xcf', 'xmp', 'Z', 'zip' ); + 'nef', 'pbm', 'pdf', 'php', 'pl', 'ppm', 'ps', 'psd', 'sea', 'sh', + 'shtml', 'tar', 'tgz', 'thb', 'txt', 'xcf', 'xmp', 'Z', 'zip' ); my $nondir_re = '\.(' . join("|", @nondir_extensions) . ')$'; # JPEG, GIF, and PNG files that are are smaller than this are rejected: # this is so that you can use an image directory that contains both big # images and thumbnails, and have it only select the big versions. +# But, if all of your images are smaller than this, all will be rejected. # -my $min_image_width = 255; -my $min_image_height = 255; +my $min_image_width = 500; +my $min_image_height = 500; my @all_files = (); # list of "good" files we've collected my %seen_inodes; # for breaking recursive symlink loops @@ -121,6 +118,10 @@ my $stat_count = 0; # number of files/dirs stat'ed my $skip_count_unstat = 0; # number of files skipped without stat'ing my $skip_count_stat = 0; # number of files skipped after stat +my $config_file = $ENV{HOME} . "/.xscreensaver"; +my $image_directory = undef; + + sub find_all_files($); sub find_all_files($) { my ($dir) = @_; @@ -254,7 +255,7 @@ sub read_cache($) { $dd = "$ENV{HOME}/.cache/xscreensaver"; if (! -d $dd) { mkdir ($dd) || error ("mkdir $dd: $!"); } $cache_file_name = "$dd/xscreensaver-getimage.cache" - } elsif (-d "$ENV{HOME}/tmp") { # If ~/.tmp/ exists, use it. + } elsif (-d "$ENV{HOME}/tmp") { # If ~/tmp/ exists, use it. $cache_file_name = "$ENV{HOME}/tmp/.xscreensaver-getimage.cache"; } else { $cache_file_name = "$ENV{HOME}/.xscreensaver-getimage.cache"; @@ -360,19 +361,28 @@ sub html_unquote($) { +# Figure out what the proxy server should be, either from environment +# variables or by parsing the output of the (MacOS) program "scutil", +# which tells us what the system-wide proxy settings are. +# sub set_proxy($) { my ($ua) = @_; - if (!defined($ENV{http_proxy}) && !defined($ENV{HTTP_PROXY})) { - my $proxy_data = `scutil --proxy 2>/dev/null`; - my ($server) = ($proxy_data =~ m/\bHTTPProxy\s*:\s*([^\s]+)/s); - my ($port) = ($proxy_data =~ m/\bHTTPPort\s*:\s*([^\s]+)/s); - if ($server) { + my $proxy_data = `scutil --proxy 2>/dev/null`; + foreach my $proto ('http', 'https') { + my ($server) = ($proxy_data =~ m/\b${proto}Proxy\s*:\s*([^\s]+)/si); + my ($port) = ($proxy_data =~ m/\b${proto}Port\s*:\s*([^\s]+)/si); + my ($enable) = ($proxy_data =~ m/\b${proto}Enable\s*:\s*([^\s]+)/si); + + if ($server && $enable) { # Note: this ignores the "ExceptionsList". - $ENV{http_proxy} = "http://" . $server . ($port ? ":$port" : "") . "/"; - print STDERR "$progname: MacOS proxy: $ENV{http_proxy}\n" - if ($verbose > 2) - } + my $proto2 = 'http'; + $ENV{"${proto}_proxy"} = ("${proto2}://" . $server . + ($port ? ":$port" : "") . "/"); + print STDERR "$progname: MacOS $proto proxy: " . + $ENV{"${proto}_proxy"} . "\n" + if ($verbose > 2); + } } $ua->env_proxy(); @@ -382,12 +392,50 @@ sub set_proxy($) { sub init_lwp() { if (! defined ($LWP::Simple::ua)) { error ("\n\n\tPerl is broken. Do this to repair it:\n" . - "\n\tsudo cpan LWP::Simple\n"); + "\n\tsudo cpan LWP::Simple LWP::Protocol::https Mozilla::CA\n"); } set_proxy ($LWP::Simple::ua); } +sub sanity_check_lwp() { + my $url1 = 'https://www.mozilla.org/'; + my $url2 = 'http://www.mozilla.org/'; + my $body = (LWP::Simple::get($url1) || ''); + if (length($body) < 10240) { + my $err = ""; + $body = (LWP::Simple::get($url2) || ''); + if (length($body) < 10240) { + $err = "Perl is broken: neither HTTP nor HTTPS URLs work."; + } else { + $err = "Perl is broken: HTTP URLs work but HTTPS URLs don't."; + } + $err .= "\nMaybe try: sudo cpan -f Mozilla::CA LWP::Protocol::https"; + $err =~ s/^/\t/gm; + error ("\n\n$err\n"); + } +} + + +# If the URL does not already end with an extension appropriate for the +# content-type, add it after a "#" search. +# +# This is for when we know the content type of the URL, but the URL is +# some crazy thing without an extension. The files on disk need to have +# proper extensions. +# +sub force_extension($$) { + my ($url, $ct) = @_; + return $url unless (defined($url) && defined($ct)); + my ($ext) = ($ct =~ m@^image/([-a-z\d]+)@si); + return $url unless $ext; + $ext = lc($ext); + $ext = 'jpg' if ($ext eq 'jpeg'); + return $url if ($url =~ m/\.$ext$/si); + return "$url#.$ext"; +} + + # Returns a list of the image enclosures in the RSS or Atom feed. # Elements of the list are references, [ "url", "guid" ]. # @@ -399,15 +447,30 @@ sub parse_feed($) { $LWP::Simple::ua->agent ("$progname/$version"); $LWP::Simple::ua->timeout (10); # bail sooner than the default of 3 minutes + + # Half the time, random Linux systems don't have Mozilla::CA installed, + # which results in "Can't verify SSL peers without knowning which + # Certificate Authorities to trust". + # + # In xscreensaver-text we just disabled certificate checks. However, + # malicious images really do exist, so for xscreensaver-getimage-file, + # let's actually require that SSL be installed properly. + + print STDERR "$progname: loading $url\n" if ($verbose); my $body = (LWP::Simple::get($url) || ''); - if ($body !~ m@^<\?xml\s@si) { + if ($body !~ m@^\s*<(\?xml|rss)\b@si) { # Not an RSS/Atom feed. Try RSS autodiscovery. # (Great news, everybody: Flickr no longer provides RSS for "Sets", # only for "Photostreams", and only the first 20 images of those. # Thanks, assholes.) + if ($body =~ m/^\s*$/s) { + sanity_check_lwp(); + error ("null response: $url"); + } + error ("not an RSS or Atom feed, or HTML: $url") unless ($body =~ m@<(HEAD|BODY|A|IMG)\b@si); @@ -449,72 +512,83 @@ sub parse_feed($) { # First look for # if (! $iurl) { - $item =~ s!(]*>)!{ - my $link = $1; - my ($rel) = ($link =~ m/\bREL\s*=\s*[\"\']?([^<>\'\"]+)/si); - my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si); + foreach my $link ($item =~ m@]*>@gsi) { + last if $iurl; my ($href) = ($link =~ m/\bHREF\s*=\s*[\"\']([^<>\'\"]+)/si); - - if ($rel && lc($rel) eq 'enclosure') { - if ($type) { - $href = undef unless ($type =~ m@^image/@si); # omit videos - } - $iurl = html_unquote($href) if $href; - } - $link; - }!gsexi; + my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si); + my ($rel) = ($link =~ m/\bREL\s*=\s*[\"\']?([^<>\'\"]+)/si); + $href = undef unless (lc($rel || '') eq 'enclosure'); + $href = undef if ($type && $type !~ m@^image/@si); # omit videos + $iurl = html_unquote($href) if $href; + $iurl = force_extension ($iurl, $type); + } } # Then look for # if (! $iurl) { - $item =~ s!(]*>)!{ - my $link = $1; + foreach my $link ($item =~ m@]*>@gsi) { + last if $iurl; my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si); + my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si); + my ($med) = ($link =~ m/\bMEDIUM\s*=\s*[\"\']?([^<>\'\"]+)/si); + $type = 'image/jpeg' if (!$type && lc($med || '') eq 'image'); + $href = undef if ($type && $type !~ m@^image/@si); # omit videos $iurl = html_unquote($href) if $href; - $link; - }!gsexi; + $iurl = force_extension ($iurl, $type); + } } # Then look for # if (! $iurl) { - $item =~ s!(]*>)!{ - my $link = $1; - my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si); + foreach my $link ($item =~ m@]*>@gsi) { + last if $iurl; my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si); - $iurl = html_unquote($href) - if ($href && $type && $type =~ m@^image/@si); # omit videos - $link; - }!gsexi; + my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si); + $href = undef if ($type && $type !~ m@^image/@si); # omit videos + $iurl = html_unquote($href) if ($href); + $iurl = force_extension ($iurl, $type); + } } # Ok, maybe there's an image in the field? # if (! $iurl) { - $item =~ s!((]*>)([^<>]*))!{ - my ($all, $u2) = ($1, $3); + foreach my $link ($item =~ m@]*>([^<>]*)@gsi) { + last if $iurl; + my $u2 = $1; $iurl = html_unquote($u2) if ($u2 =~ m/$good_file_re/io); - $all; - }!gsexi; + if (! $iurl) { + my $u3 = $u2; + $u3 =~ s/#.*$//gs; + $u3 =~ s/[?&].*$//gs; + $iurl = html_unquote($u2) if ($u3 =~ m/$good_file_re/io); + } + } } - # Then look for ... with an inside. + # Then look for or ... with an + # inside. If more than one image, take the first. # - if (! $iurl) { - $item =~ s!(]*>.*?)!{ + foreach my $t ('content:encoded', 'description') { + last if $iurl; + foreach my $link ($item =~ m@<$t[^<>]*>(.*?)@gsi) { + last if $iurl; my $desc = $1; - $desc = html_unquote($desc); + if ($desc =~ m@@gs) { + $desc = $1; + } else { + $desc = html_unquote($desc); + } my ($href) = ($desc =~ m@]*\bSRC=[\"\']?([^\"\'<>]+)@si); - $iurl = $href if ($href); - $desc; - }!gsexi; + $iurl = html_unquote($href) if ($href); + # If IMG SRC has a bogus extension, pretend it's a JPEG. + $iurl = force_extension ($iurl, 'image/jpeg') + if ($iurl && $iurl !~ m/$good_file_re/io); + } } - # Could also do , but the above probably covers all - # of the real-world possibilities. - - # Find a unique ID for this image, to defeat image farms. # First look for ... ($id) = ($item =~ m!]*>\s*([^<>]+?)\s*!si) unless $id; @@ -525,6 +599,8 @@ sub parse_feed($) { # Then look for ... ($id) = ($item =~ m!]*>\s*([^<>]+?)\s*!si) unless $id; + # If we only have a GUID or LINK, but it's an image, use that. + $iurl = $id if (!$iurl && $id && $id =~ m/$good_file_re/io); if ($iurl) { $id = $iurl unless $id; @@ -555,6 +631,49 @@ sub md5_file($) { } +# expands the first URL relative to the second. +# +sub expand_url($$) { + my ($url, $base) = @_; + + $url =~ s/^\s+//gs; # lose whitespace at front and back + $url =~ s/\s+$//gs; + + if (! ($url =~ m/^[a-z]+:/)) { + + $base =~ s@(\#.*)$@@; # strip anchors + $base =~ s@(\?.*)$@@; # strip arguments + $base =~ s@/[^/]*$@/@; # take off trailing file component + + my $tail = ''; + if ($url =~ s@(\#.*)$@@) { $tail = $1; } # save anchors + if ($url =~ s@(\?.*)$@@) { $tail = "$1$tail"; } # save arguments + + my $base2 = $base; + + $base2 =~ s@^([a-z]+:/+[^/]+)/.*@$1@ # if url is an absolute path + if ($url =~ m@^/@); + + my $ourl = $url; + + $url = $base2 . $url; + $url =~ s@/\./@/@g; # expand "." + 1 while ($url =~ s@/[^/]+/\.\./@/@s); # expand ".." + + $url .= $tail; # put anchors/args back + + print STDERR "$progname: relative URL: $ourl --> $url\n" + if ($verbose > 1); + + } else { + print STDERR "$progname: absolute URL: $url\n" + if ($verbose > 2); + } + + return $url; +} + + # Given the URL of an image, download it into the given directory # and return the file name. # @@ -564,7 +683,8 @@ sub download_image($$$) { my $url2 = $url; $url2 =~ s/\#.*$//s; # Omit search terms after file extension $url2 =~ s/\?.*$//s; - my ($ext) = ($url2 =~ m@\.([a-z\d]+)$@si); + my ($ext) = ($url =~ m@\.([a-z\d]+)$@si); + ($ext) = ($url2 =~ m@\.([a-z\d]+)$@si) unless $ext; # If the feed hasn't put a sane extension on their URLs, nothing's going # to work. This code assumes that file names have extensions, even the @@ -578,7 +698,8 @@ sub download_image($$$) { # Don't bother downloading files that we will reject anyway. # - if (! ($url2 =~ m/$good_file_re/io)) { + if (! ($url =~ m/$good_file_re/io || + $url2 =~ m/$good_file_re/io)) { print STDERR "$progname: skipping non-image URL: $url\n" if ($verbose > 1); return undef; @@ -612,6 +733,10 @@ sub download_image($$$) { # _q square, 150x150 # _s square, 75x75 # + # Note: if we wanted to get the _k or _o version instead of the _b or _h + # version, we'd need to crack the DRM -- which is easy: see crack_secret + # in "https://www.jwz.org/hacks/galdown". + # $url =~ s@_[sqtmnzc](\.[a-z]+)$@_b$1@si if ($url =~ m@^https?://[^/?#&]*?flickr\.com/@si); @@ -619,6 +744,8 @@ sub download_image($$$) { if ($verbose > 1); init_lwp(); $LWP::Simple::ua->agent ("$progname/$version"); + + $url =~ s/\#.*$//s; # Omit search terms my $status = LWP::Simple::mirror ($url, "$dir/$file"); if (!LWP::Simple::is_success ($status)) { print STDERR "$progname: error $status: $url\n"; # keep going @@ -643,7 +770,7 @@ sub mirror_feed($) { if (! -d $dir) { mkdir ($dir) || error ("mkdir $dir: $!"); } $dir .= "/feeds"; if (! -d $dir) { mkdir ($dir) || error ("mkdir $dir: $!"); } - } elsif (-d "$ENV{HOME}/tmp") { # If ~/.tmp/ exists, use it. + } elsif (-d "$ENV{HOME}/tmp") { # If ~/tmp/ exists, use it. $dir = "$ENV{HOME}/tmp/.xscreensaver-feeds"; } else { $dir = "$ENV{HOME}/.xscreensaver-feeds"; @@ -698,7 +825,8 @@ sub mirror_feed($) { closedir $dirh; if ($count <= 0) { - print STDERR "$progname: no files in cache of $url\n" if ($verbose); + print STDERR "$progname: no image files in cache of $url\n" + if ($verbose); $poll_p = 1; } } @@ -723,8 +851,16 @@ sub mirror_feed($) { my @urls = parse_feed ($url); print STDERR "$progname: " . ($#urls + 1) . " images\n" if ($verbose > 1); + my %seen_src_urls; foreach my $p (@urls) { my ($furl, $id) = @$p; + $furl = expand_url ($furl, $url); + + # No need to download the same image twice, even if it was in the feed + # multiple times under different GUIDs. + next if ($seen_src_urls{$furl}); + $seen_src_urls{$furl} = 1; + my $f = download_image ($furl, $id, $dir); next unless $f; $files{$f} = 1; # Got it, don't delete @@ -836,25 +972,40 @@ sub find_random_file($) { write_cache ($dir); if ($#all_files < 0) { - print STDERR "$progname: no files in $dir\n"; + print STDERR "$progname: no image files in $dir\n"; exit 1; } my $max_tries = 50; - for (my $i = 0; $i < $max_tries; $i++) { - - my $n = int (rand ($#all_files + 1)); - my $file = $all_files[$n]; - if (large_enough_p ($file)) { - if (! $url) { - $file =~ s@^\Q$dir/@@so || die; # remove $dir from front + my $total_files = @all_files; + my $sparse_p = ($total_files < 20); + + # If the directory has a lot of files in it: + # Make a pass through looking for hirez files (assume some are thumbs); + # If we found none, then, select any other file at random. + # Otherwise if there are a small number of files: + # Just select one at random (in case there's like, just one hirez). + + for (my $check_size_p = $sparse_p ? 0 : 1; + $check_size_p >= 0; $check_size_p--) { + + for (my $i = 0; $i < $max_tries; $i++) { + my $n = int (rand ($total_files)); + my $file = $all_files[$n]; + if (!$check_size_p || large_enough_p ($file)) { + if (! $url) { + $file =~ s@^\Q$dir/@@so || die; # remove $dir from front + } + return $file; } - return $file; } } - print STDERR "$progname: no suitable images in $dir " . - "(after $max_tries tries)\n"; + print STDERR "$progname: no suitable images in " . ($url || $dir) . " -- " . + ($total_files <= $max_tries + ? "all $total_files images" + : "$max_tries of $total_files images") . + " are smaller than ${min_image_width}x${min_image_height}.\n"; # If we got here, blow away the cache. Maybe it's stale. unlink $cache_file_name if $cache_file_name; @@ -999,6 +1150,81 @@ sub image_file_size($) { } +# Reads the prefs we use from ~/.xscreensaver +# +sub get_x11_prefs() { + my $got_any_p = 0; + + if (open (my $in, '<', $config_file)) { + print STDERR "$progname: reading $config_file\n" if ($verbose > 1); + local $/ = undef; # read entire file + my $body = <$in>; + close $in; + $got_any_p = get_x11_prefs_1 ($body); + + } elsif ($verbose > 1) { + print STDERR "$progname: $config_file: $!\n"; + } + + if (! $got_any_p && defined ($ENV{DISPLAY})) { + # We weren't able to read settings from the .xscreensaver file. + # Fall back to any settings in the X resource database + # (/usr/X11R6/lib/X11/app-defaults/XScreenSaver) + # + print STDERR "$progname: reading X resources\n" if ($verbose > 1); + my $body = `appres XScreenSaver xscreensaver -1`; + $got_any_p = get_x11_prefs_1 ($body); + } +} + + +sub get_x11_prefs_1($) { + my ($body) = @_; + + my $got_any_p = 0; + $body =~ s@\\\n@@gs; + $body =~ s@^[ \t]*#[^\n]*$@@gm; + + if ($body =~ m/^[.*]*imageDirectory:[ \t]*([^\s]+)\s*$/im) { + $image_directory = $1; + $got_any_p = 1; + } + return $got_any_p; +} + + +sub get_cocoa_prefs($) { + my ($id) = @_; + print STDERR "$progname: reading Cocoa prefs: \"$id\"\n" if ($verbose > 1); + my $v = get_cocoa_pref_1 ($id, "imageDirectory"); + $v = '~/Pictures' unless defined ($v); # Match default in XScreenSaverView + $image_directory = $v if defined ($v); +} + + +sub get_cocoa_pref_1($$) { + my ($id, $key) = @_; + # make sure there's nothing stupid/malicious in either string. + $id =~ s/[^-a-z\d. ]/_/gsi; + $key =~ s/[^-a-z\d. ]/_/gsi; + my $cmd = "defaults -currentHost read \"$id\" \"$key\""; + + print STDERR "$progname: executing $cmd\n" + if ($verbose > 3); + + my $val = `$cmd 2>/dev/null`; + $val =~ s/^\s+//s; + $val =~ s/\s+$//s; + + print STDERR "$progname: Cocoa: $id $key = \"$val\"\n" + if ($verbose > 2); + + $val = undef if ($val =~ m/^$/s); + + return $val; +} + + sub error($) { my ($err) = @_; print STDERR "$progname: $err\n"; @@ -1006,7 +1232,7 @@ sub error($) { } sub usage() { - print STDERR "usage: $progname [--verbose] directory-or-feed-url\n\n" . + print STDERR "usage: $progname [--verbose] [ directory-or-feed-url ]\n\n" . " Prints the name of a randomly-selected image file. The directory\n" . " is searched recursively. Images smaller than " . "${min_image_width}x${min_image_height} are excluded.\n" . @@ -1018,7 +1244,8 @@ sub usage() { } sub main() { - my $dir = undef; + my $cocoa_id = undef; + my $abs_p = 0; while ($_ = $ARGV[0]) { shift @ARGV; @@ -1029,28 +1256,59 @@ sub main() { elsif (m/^--?no-spotlight$/s) { $use_spotlight_p = 0; } elsif (m/^--?cache$/s) { $cache_p = 1; } elsif (m/^--?no-?cache$/s) { $cache_p = 0; } + elsif (m/^--?cocoa$/) { $cocoa_id = shift @ARGV; } + elsif (m/^--?abs(olute)?$/) { $abs_p = 1; } elsif (m/^-./) { usage; } - elsif (!defined($dir)) { $dir = $_; } + elsif (!defined($image_directory)) { $image_directory = $_; } else { usage; } } - usage unless (defined($dir)); + # Most hacks (X11 and Cocoa) pass a --directory value on the command line, + # but if they don't, look it up from the resources. Currently this only + # happens with "glitchpeg" which invokes xscreensaver-getimage-file + # directly instead of going through the traditional path. + # + if (! $image_directory) { + if (!defined ($cocoa_id)) { + # see OSX/XScreenSaverView.m + $cocoa_id = $ENV{XSCREENSAVER_CLASSPATH}; + } - $dir =~ s@^feed:@http:@si; + if (defined ($cocoa_id)) { + get_cocoa_prefs($cocoa_id); + error ("no imageDirectory in $cocoa_id") unless $image_directory; + } else { + get_x11_prefs(); + error ("no imageDirectory in X11 resources") unless $image_directory; + } + } + + usage unless (defined($image_directory)); + + $image_directory =~ s@^feed:@http:@si; - if ($dir =~ m/^https?:/si) { + if ($image_directory =~ m/^https?:/si) { # ok } else { - $dir =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/" - $dir =~ s@/+$@@s; # omit trailing / + $image_directory =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/" + $image_directory =~ s@/+$@@s; # omit trailing / - if (! -d $dir) { - print STDERR "$progname: $dir: not a directory or URL\n"; + if (! -d $image_directory) { + print STDERR "$progname: $image_directory not a directory or URL\n"; usage; } } - my $file = find_random_file ($dir); + my $file = find_random_file ($image_directory); + + # With --absolute return fully qualified paths instead of relative to --dir. + if ($abs_p && + $file !~ m@^/@ && + $image_directory =~ m@^/@s) { + $file = "$image_directory/$file"; + $file =~ s@//+@/@gs; + } + print STDOUT "$file\n"; }