#!/usr/bin/perl -w
-# Copyright © 2001-2012 Jamie Zawinski <jwz@jwz.org>.
+# Copyright © 2001-2018 Jamie Zawinski <jwz@jwz.org>.
#
# Permission to use, copy, modify, distribute, and sell this software and its
# documentation for any purpose is hereby granted without fee, provided that
# but in Perl 5.10, both of these load, and cause errors!
# So we have to check for S_ISUID instead of S_ISDIR? WTF?
-use bytes; # Larry can take Unicode and shove it up his ass sideways.
- # Perl 5.8.0 causes us to start getting incomprehensible
- # errors about UTF-8 all over the place without this.
-
use Digest::MD5 qw(md5_base64);
# Some Linux systems don't install LWP by default!
my $progname = $0; $progname =~ s@.*/@@g;
-my $version = q{ $Revision: 1.33 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
+my ($version) = ('$Revision: 1.52 $' =~ m/\s(\d[.\d]+)\s/s);
my $verbose = 0;
#
my @nondir_extensions = ('ai', 'bmp', 'bz2', 'cr2', 'crw', 'db',
'dmg', 'eps', 'gz', 'hqx', 'htm', 'html', 'icns', 'ilbm', 'mov',
- 'nef', 'pbm', 'pdf', 'pl', 'ppm', 'ps', 'psd', 'sea', 'sh', 'shtml',
- 'tar', 'tgz', 'thb', 'txt', 'xcf', 'xmp', 'Z', 'zip' );
+ 'nef', 'pbm', 'pdf', 'php', 'pl', 'ppm', 'ps', 'psd', 'sea', 'sh',
+ 'shtml', 'tar', 'tgz', 'thb', 'txt', 'xcf', 'xmp', 'Z', 'zip' );
my $nondir_re = '\.(' . join("|", @nondir_extensions) . ')$';
# JPEG, GIF, and PNG files that are are smaller than this are rejected:
# this is so that you can use an image directory that contains both big
# images and thumbnails, and have it only select the big versions.
+# But, if all of your images are smaller than this, all will be rejected.
#
-my $min_image_width = 255;
-my $min_image_height = 255;
+my $min_image_width = 500;
+my $min_image_height = 500;
my @all_files = (); # list of "good" files we've collected
my %seen_inodes; # for breaking recursive symlink loops
my $skip_count_unstat = 0; # number of files skipped without stat'ing
my $skip_count_stat = 0; # number of files skipped after stat
+my $config_file = $ENV{HOME} . "/.xscreensaver";
+my $image_directory = undef;
+
+
sub find_all_files($);
sub find_all_files($) {
my ($dir) = @_;
my $dd = "$ENV{HOME}/Library/Caches"; # MacOS location
if (-d $dd) {
$cache_file_name = "$dd/org.jwz.xscreensaver.getimage.cache";
- } elsif (-d "$ENV{HOME}/tmp") {
+ } elsif (-d "$ENV{HOME}/.cache") { # Gnome "FreeDesktop XDG" location
+ $dd = "$ENV{HOME}/.cache/xscreensaver";
+ if (! -d $dd) { mkdir ($dd) || error ("mkdir $dd: $!"); }
+ $cache_file_name = "$dd/xscreensaver-getimage.cache"
+ } elsif (-d "$ENV{HOME}/tmp") { # If ~/tmp/ exists, use it.
$cache_file_name = "$ENV{HOME}/tmp/.xscreensaver-getimage.cache";
} else {
$cache_file_name = "$ENV{HOME}/.xscreensaver-getimage.cache";
print $cache_fd "$dir\n";
foreach (@all_files) {
my $f = $_; # stupid Perl. do this to avoid modifying @all_files!
- $f =~ s@^\Q$dir\L/@@so || die; # remove $dir from front
+ $f =~ s@^\Q$dir/@@so || die; # remove $dir from front
print $cache_fd "$f\n";
}
}
sub html_unquote($) {
my ($h) = @_;
+
+ # This only needs to handle entities that occur in RSS, not full HTML.
my %ent = ( 'amp' => '&', 'lt' => '<', 'gt' => '>',
'quot' => '"', 'apos' => "'" );
$h =~ s/(&(\#)?([[:alpha:]\d]+);?)/
+# Figure out what the proxy server should be, either from environment
+# variables or by parsing the output of the (MacOS) program "scutil",
+# which tells us what the system-wide proxy settings are.
+#
sub set_proxy($) {
my ($ua) = @_;
- if (!defined($ENV{http_proxy}) && !defined($ENV{HTTP_PROXY})) {
- my $proxy_data = `scutil --proxy 2>/dev/null`;
- my ($server) = ($proxy_data =~ m/\bHTTPProxy\s*:\s*([^\s]+)/s);
- my ($port) = ($proxy_data =~ m/\bHTTPPort\s*:\s*([^\s]+)/s);
- if ($server) {
+ my $proxy_data = `scutil --proxy 2>/dev/null`;
+ foreach my $proto ('http', 'https') {
+ my ($server) = ($proxy_data =~ m/\b${proto}Proxy\s*:\s*([^\s]+)/si);
+ my ($port) = ($proxy_data =~ m/\b${proto}Port\s*:\s*([^\s]+)/si);
+ my ($enable) = ($proxy_data =~ m/\b${proto}Enable\s*:\s*([^\s]+)/si);
+
+ if ($server && $enable) {
# Note: this ignores the "ExceptionsList".
- $ENV{http_proxy} = "http://" . $server . ($port ? ":$port" : "") . "/";
- print STDERR "$progname: MacOS proxy: $ENV{http_proxy}\n"
- if ($verbose > 2)
- }
+ my $proto2 = 'http';
+ $ENV{"${proto}_proxy"} = ("${proto2}://" . $server .
+ ($port ? ":$port" : "") . "/");
+ print STDERR "$progname: MacOS $proto proxy: " .
+ $ENV{"${proto}_proxy"} . "\n"
+ if ($verbose > 2);
+ }
}
$ua->env_proxy();
sub init_lwp() {
if (! defined ($LWP::Simple::ua)) {
error ("\n\n\tPerl is broken. Do this to repair it:\n" .
- "\n\tsudo cpan LWP::Simple\n");
+ "\n\tsudo cpan LWP::Simple LWP::Protocol::https Mozilla::CA\n");
}
set_proxy ($LWP::Simple::ua);
}
+sub sanity_check_lwp() {
+ my $url1 = 'https://www.mozilla.org/';
+ my $url2 = 'http://www.mozilla.org/';
+ my $body = (LWP::Simple::get($url1) || '');
+ if (length($body) < 10240) {
+ my $err = "";
+ $body = (LWP::Simple::get($url2) || '');
+ if (length($body) < 10240) {
+ $err = "Perl is broken: neither HTTP nor HTTPS URLs work.";
+ } else {
+ $err = "Perl is broken: HTTP URLs work but HTTPS URLs don't.";
+ }
+ $err .= "\nMaybe try: sudo cpan -f Mozilla::CA LWP::Protocol::https";
+ $err =~ s/^/\t/gm;
+ error ("\n\n$err\n");
+ }
+}
+
+
+# If the URL does not already end with an extension appropriate for the
+# content-type, add it after a "#" search.
+#
+# This is for when we know the content type of the URL, but the URL is
+# some crazy thing without an extension. The files on disk need to have
+# proper extensions.
+#
+sub force_extension($$) {
+ my ($url, $ct) = @_;
+ return $url unless (defined($url) && defined($ct));
+ my ($ext) = ($ct =~ m@^image/([-a-z\d]+)@si);
+ return $url unless $ext;
+ $ext = lc($ext);
+ $ext = 'jpg' if ($ext eq 'jpeg');
+ return $url if ($url =~ m/\.$ext$/si);
+ return "$url#.$ext";
+}
+
+
# Returns a list of the image enclosures in the RSS or Atom feed.
# Elements of the list are references, [ "url", "guid" ].
#
$LWP::Simple::ua->agent ("$progname/$version");
$LWP::Simple::ua->timeout (10); # bail sooner than the default of 3 minutes
+
+ # Half the time, random Linux systems don't have Mozilla::CA installed,
+ # which results in "Can't verify SSL peers without knowning which
+ # Certificate Authorities to trust".
+ #
+ # In xscreensaver-text we just disabled certificate checks. However,
+ # malicious images really do exist, so for xscreensaver-getimage-file,
+ # let's actually require that SSL be installed properly.
+
+ print STDERR "$progname: loading $url\n" if ($verbose);
my $body = (LWP::Simple::get($url) || '');
- if ($body !~ m@^<\?xml\s@si) {
+ if ($body !~ m@^\s*<(\?xml|rss)\b@si) {
# Not an RSS/Atom feed. Try RSS autodiscovery.
+ # (Great news, everybody: Flickr no longer provides RSS for "Sets",
+ # only for "Photostreams", and only the first 20 images of those.
+ # Thanks, assholes.)
+
+ if ($body =~ m/^\s*$/s) {
+ sanity_check_lwp();
+ error ("null response: $url");
+ }
+
error ("not an RSS or Atom feed, or HTML: $url")
unless ($body =~ m@<(HEAD|BODY|A|IMG)\b@si);
$p =~ m! \b HREF \s* = \s* ['"] ( [^<>'"]+ ) !six
) {
my $u2 = html_unquote ($1);
+ if ($u2 =~ m!^/!s) {
+ my ($h) = ($url =~ m!^([a-z]+://[^/]+)!si);
+ $u2 = "$h$u2";
+ }
print STDERR "$progname: found feed: $u2\n"
if ($verbose);
return parse_feed ($u2);
# First look for <link rel="enclosure" href="...">
#
if (! $iurl) {
- $item =~ s!(<LINK[^<>]*>)!{
- my $link = $1;
- my ($rel) = ($link =~ m/\bREL\s*=\s*[\"\']?([^<>\'\"]+)/si);
- my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ foreach my $link ($item =~ m@<LINK[^<>]*>@gsi) {
+ last if $iurl;
my ($href) = ($link =~ m/\bHREF\s*=\s*[\"\']([^<>\'\"]+)/si);
-
- if ($rel && lc($rel) eq 'enclosure') {
- if ($type) {
- $href = undef unless ($type =~ m@^image/@si); # omit videos
- }
- $iurl = html_unquote($href) if $href;
- }
- $link;
- }!gsexi;
+ my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($rel) = ($link =~ m/\bREL\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ $href = undef unless (lc($rel || '') eq 'enclosure');
+ $href = undef if ($type && $type !~ m@^image/@si); # omit videos
+ $iurl = html_unquote($href) if $href;
+ $iurl = force_extension ($iurl, $type);
+ }
}
# Then look for <media:content url="...">
#
if (! $iurl) {
- $item =~ s!(<MEDIA:CONTENT[^<>]*>)!{
- my $link = $1;
+ foreach my $link ($item =~ m@<MEDIA:CONTENT[^<>]*>@gsi) {
+ last if $iurl;
my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si);
+ my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($med) = ($link =~ m/\bMEDIUM\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ $type = 'image/jpeg' if (!$type && lc($med || '') eq 'image');
+ $href = undef if ($type && $type !~ m@^image/@si); # omit videos
$iurl = html_unquote($href) if $href;
- $link;
- }!gsexi;
+ $iurl = force_extension ($iurl, $type);
+ }
}
# Then look for <enclosure url="..."/>
#
if (! $iurl) {
- $item =~ s!(<ENCLOSURE[^<>]*>)!{
- my $link = $1;
- my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ foreach my $link ($item =~ m@<ENCLOSURE[^<>]*>@gsi) {
+ last if $iurl;
my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si);
- $iurl = html_unquote($href)
- if ($href && $type && $type =~ m@^image/@si); # omit videos
- $link;
- }!gsexi;
+ my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ $href = undef if ($type && $type !~ m@^image/@si); # omit videos
+ $iurl = html_unquote($href) if ($href);
+ $iurl = force_extension ($iurl, $type);
+ }
}
# Ok, maybe there's an image in the <url> field?
#
if (! $iurl) {
- $item =~ s!((<URL\b[^<>]*>)([^<>]*))!{
- my ($all, $u2) = ($1, $3);
+ foreach my $link ($item =~ m@<URL\b[^<>]*>([^<>]*)@gsi) {
+ last if $iurl;
+ my $u2 = $1;
$iurl = html_unquote($u2) if ($u2 =~ m/$good_file_re/io);
- $all;
- }!gsexi;
+ if (! $iurl) {
+ my $u3 = $u2;
+ $u3 =~ s/#.*$//gs;
+ $u3 =~ s/[?&].*$//gs;
+ $iurl = html_unquote($u2) if ($u3 =~ m/$good_file_re/io);
+ }
+ }
}
- # Then look for <description>... with an <img href="..."> inside.
+ # Then look for <content:encoded> or <description>... with an
+ # <img src="..."> inside. If more than one image, take the first.
#
- if (! $iurl) {
- $item =~ s!(<description[^<>]*>.*?</description>)!{
+ foreach my $t ('content:encoded', 'description') {
+ last if $iurl;
+ foreach my $link ($item =~ m@<$t[^<>]*>(.*?)</$t>@gsi) {
+ last if $iurl;
my $desc = $1;
- $desc = html_unquote($desc);
+ if ($desc =~ m@<!\[CDATA\[\s*(.*?)\s*\]\]>@gs) {
+ $desc = $1;
+ } else {
+ $desc = html_unquote($desc);
+ }
my ($href) = ($desc =~ m@<IMG[^<>]*\bSRC=[\"\']?([^\"\'<>]+)@si);
- $iurl = $href if ($href);
- $desc;
- }!gsexi;
+ $iurl = html_unquote($href) if ($href);
+ # If IMG SRC has a bogus extension, pretend it's a JPEG.
+ $iurl = force_extension ($iurl, 'image/jpeg')
+ if ($iurl && $iurl !~ m/$good_file_re/io);
+ }
}
- # Could also do <content:encoded>, but the above probably covers all
- # of the real-world possibilities.
-
-
# Find a unique ID for this image, to defeat image farms.
# First look for <id>...</id>
($id) = ($item =~ m!<ID\b[^<>]*>\s*([^<>]+?)\s*</ID>!si) unless $id;
# Then look for <link> ... </link>
($id) = ($item =~ m!<LINK\b[^<>]*>\s*([^<>]+?)\s*</LINK>!si) unless $id;
+ # If we only have a GUID or LINK, but it's an image, use that.
+ $iurl = $id if (!$iurl && $id && $id =~ m/$good_file_re/io);
if ($iurl) {
$id = $iurl unless $id;
}
+# expands the first URL relative to the second.
+#
+sub expand_url($$) {
+ my ($url, $base) = @_;
+
+ $url =~ s/^\s+//gs; # lose whitespace at front and back
+ $url =~ s/\s+$//gs;
+
+ if (! ($url =~ m/^[a-z]+:/)) {
+
+ $base =~ s@(\#.*)$@@; # strip anchors
+ $base =~ s@(\?.*)$@@; # strip arguments
+ $base =~ s@/[^/]*$@/@; # take off trailing file component
+
+ my $tail = '';
+ if ($url =~ s@(\#.*)$@@) { $tail = $1; } # save anchors
+ if ($url =~ s@(\?.*)$@@) { $tail = "$1$tail"; } # save arguments
+
+ my $base2 = $base;
+
+ $base2 =~ s@^([a-z]+:/+[^/]+)/.*@$1@ # if url is an absolute path
+ if ($url =~ m@^/@);
+
+ my $ourl = $url;
+
+ $url = $base2 . $url;
+ $url =~ s@/\./@/@g; # expand "."
+ 1 while ($url =~ s@/[^/]+/\.\./@/@s); # expand ".."
+
+ $url .= $tail; # put anchors/args back
+
+ print STDERR "$progname: relative URL: $ourl --> $url\n"
+ if ($verbose > 1);
+
+ } else {
+ print STDERR "$progname: absolute URL: $url\n"
+ if ($verbose > 2);
+ }
+
+ return $url;
+}
+
+
# Given the URL of an image, download it into the given directory
# and return the file name.
#
sub download_image($$$) {
my ($url, $uid, $dir) = @_;
- my ($ext) = ($url =~ m@\.([a-z\d]+)$@si);
+ my $url2 = $url;
+ $url2 =~ s/\#.*$//s; # Omit search terms after file extension
+ $url2 =~ s/\?.*$//s;
+ my ($ext) = ($url =~ m@\.([a-z\d]+)$@si);
+ ($ext) = ($url2 =~ m@\.([a-z\d]+)$@si) unless $ext;
+
+ # If the feed hasn't put a sane extension on their URLs, nothing's going
+ # to work. This code assumes that file names have extensions, even the
+ # ones in the cache directory.
+ #
+ if (! $ext) {
+ print STDERR "$progname: skipping extensionless URL: $url\n"
+ if ($verbose > 1);
+ return undef;
+ }
+
+ # Don't bother downloading files that we will reject anyway.
+ #
+ if (! ($url =~ m/$good_file_re/io ||
+ $url2 =~ m/$good_file_re/io)) {
+ print STDERR "$progname: skipping non-image URL: $url\n"
+ if ($verbose > 1);
+ return undef;
+ }
+
my $file = md5_file ($uid);
$file .= '.' . lc($ext) if $ext;
# Special-case kludge for Flickr:
# Their RSS feeds sometimes include only the small versions of the images.
- # So if the URL ends in "s" (75x75), "t" (100x100) or "m" (240x240),then
- # munge it to be "b" (1024x1024).
+ # So if the URL ends in one of the "small-size" letters, change it to "b".
#
- $url =~ s@_[stm](\.[a-z]+)$@_b$1@si
+ # _o orig, 1600 +
+ # _k large, 2048 max
+ # _h large, 1600 max
+ # _b large, 1024 max
+ # _c medium, 800 max
+ # _z medium, 640 max
+ # "" medium, 500 max
+ # _n small, 320 max
+ # _m small, 240 max
+ # _t thumb, 100 max
+ # _q square, 150x150
+ # _s square, 75x75
+ #
+ # Note: if we wanted to get the _k or _o version instead of the _b or _h
+ # version, we'd need to crack the DRM -- which is easy: see crack_secret
+ # in "https://www.jwz.org/hacks/galdown".
+ #
+ $url =~ s@_[sqtmnzc](\.[a-z]+)$@_b$1@si
if ($url =~ m@^https?://[^/?#&]*?flickr\.com/@si);
print STDERR "$progname: downloading: $dir/$file for $uid / $url\n"
if ($verbose > 1);
init_lwp();
$LWP::Simple::ua->agent ("$progname/$version");
+
+ $url =~ s/\#.*$//s; # Omit search terms
my $status = LWP::Simple::mirror ($url, "$dir/$file");
if (!LWP::Simple::is_success ($status)) {
print STDERR "$progname: error $status: $url\n"; # keep going
my $dir = "$ENV{HOME}/Library/Caches"; # MacOS location
if (-d $dir) {
$dir = "$dir/org.jwz.xscreensaver.feeds";
- } elsif (-d "$ENV{HOME}/tmp") {
+ } elsif (-d "$ENV{HOME}/.cache") { # Gnome "FreeDesktop XDG" location
+ $dir = "$ENV{HOME}/.cache/xscreensaver";
+ if (! -d $dir) { mkdir ($dir) || error ("mkdir $dir: $!"); }
+ $dir .= "/feeds";
+ if (! -d $dir) { mkdir ($dir) || error ("mkdir $dir: $!"); }
+ } elsif (-d "$ENV{HOME}/tmp") { # If ~/tmp/ exists, use it.
$dir = "$ENV{HOME}/tmp/.xscreensaver-feeds";
} else {
$dir = "$ENV{HOME}/.xscreensaver-feeds";
my $poll_p = ($mtime + $feed_max_age < time);
- $poll_p = 1 unless ($cache_p); # poll again now with --no-cache cmd line arg.
+ # --no-cache cmd line arg means poll again right now.
+ $poll_p = 1 unless ($cache_p);
- # Even if the cache is young, let's make sure there are at least
- # a few files in it, and re-check if not.
+ # Even if the cache is young, make sure there is at least one file,
+ # and re-check if not.
#
if (! $poll_p) {
my $count = 0;
closedir $dirh;
if ($count <= 0) {
- print STDERR "$progname: no files in cache of $url\n" if ($verbose);
+ print STDERR "$progname: no image files in cache of $url\n"
+ if ($verbose);
$poll_p = 1;
}
}
my @urls = parse_feed ($url);
print STDERR "$progname: " . ($#urls + 1) . " images\n"
if ($verbose > 1);
+ my %seen_src_urls;
foreach my $p (@urls) {
my ($furl, $id) = @$p;
+ $furl = expand_url ($furl, $url);
+
+ # No need to download the same image twice, even if it was in the feed
+ # multiple times under different GUIDs.
+ next if ($seen_src_urls{$furl});
+ $seen_src_urls{$furl} = 1;
+
my $f = download_image ($furl, $id, $dir);
next unless $f;
$files{$f} = 1; # Got it, don't delete
$count++;
}
- print STDERR "$progname: empty feed: $url\n" if ($count <= 0);
+ my $empty_p = ($count <= 0);
# Now delete any files that are no longer in the feed.
# But if there was nothing in the feed (network failure?)
}
}
- # Both feed and cache are empty. No files at all.
+ # Both feed and cache are empty. No files at all. Bail.
error ("empty feed: $url") if ($kept <= 1);
+ # Feed is empty, but we have some files from last time. Warn.
+ print STDERR "$progname: empty feed: using cache: $url\n"
+ if ($empty_p);
+
$mtime = time(); # update the timestamp
} else {
write_cache ($dir);
-# @all_files = sort(@all_files);
-
if ($#all_files < 0) {
- print STDERR "$progname: no files in $dir\n";
+ print STDERR "$progname: no image files in $dir\n";
exit 1;
}
my $max_tries = 50;
- for (my $i = 0; $i < $max_tries; $i++) {
-
- my $n = int (rand ($#all_files + 1));
- my $file = $all_files[$n];
- if (large_enough_p ($file)) {
- if (! $url) {
- $file =~ s@^\Q$dir\L/@@so || die; # remove $dir from front
+ my $total_files = @all_files;
+ my $sparse_p = ($total_files < 20);
+
+ # If the directory has a lot of files in it:
+ # Make a pass through looking for hirez files (assume some are thumbs);
+ # If we found none, then, select any other file at random.
+ # Otherwise if there are a small number of files:
+ # Just select one at random (in case there's like, just one hirez).
+
+ for (my $check_size_p = $sparse_p ? 0 : 1;
+ $check_size_p >= 0; $check_size_p--) {
+
+ for (my $i = 0; $i < $max_tries; $i++) {
+ my $n = int (rand ($total_files));
+ my $file = $all_files[$n];
+ if (!$check_size_p || large_enough_p ($file)) {
+ if (! $url) {
+ $file =~ s@^\Q$dir/@@so || die; # remove $dir from front
+ }
+ return $file;
}
- return $file;
}
}
- print STDERR "$progname: no suitable images in $dir " .
- "(after $max_tries tries)\n";
+ print STDERR "$progname: no suitable images in " . ($url || $dir) . " -- " .
+ ($total_files <= $max_tries
+ ? "all $total_files images"
+ : "$max_tries of $total_files images") .
+ " are smaller than ${min_image_width}x${min_image_height}.\n";
# If we got here, blow away the cache. Maybe it's stale.
unlink $cache_file_name if $cache_file_name;
sub image_file_size($) {
my ($file) = @_;
my $in;
- if (! open ($in, '<', $file)) {
+ if (! open ($in, '<:raw', $file)) {
print STDERR "$progname: $file: $!\n" if ($verbose);
return ();
}
- binmode ($in); # Larry can take Unicode and shove it up his ass sideways.
my $body = '';
sysread ($in, $body, 1024 * 50); # The first 50k should be enough.
close $in; # (It's not for certain huge jpegs...
}
+# Reads the prefs we use from ~/.xscreensaver
+#
+sub get_x11_prefs() {
+ my $got_any_p = 0;
+
+ if (open (my $in, '<', $config_file)) {
+ print STDERR "$progname: reading $config_file\n" if ($verbose > 1);
+ local $/ = undef; # read entire file
+ my $body = <$in>;
+ close $in;
+ $got_any_p = get_x11_prefs_1 ($body);
+
+ } elsif ($verbose > 1) {
+ print STDERR "$progname: $config_file: $!\n";
+ }
+
+ if (! $got_any_p && defined ($ENV{DISPLAY})) {
+ # We weren't able to read settings from the .xscreensaver file.
+ # Fall back to any settings in the X resource database
+ # (/usr/X11R6/lib/X11/app-defaults/XScreenSaver)
+ #
+ print STDERR "$progname: reading X resources\n" if ($verbose > 1);
+ my $body = `appres XScreenSaver xscreensaver -1`;
+ $got_any_p = get_x11_prefs_1 ($body);
+ }
+}
+
+
+sub get_x11_prefs_1($) {
+ my ($body) = @_;
+
+ my $got_any_p = 0;
+ $body =~ s@\\\n@@gs;
+ $body =~ s@^[ \t]*#[^\n]*$@@gm;
+
+ if ($body =~ m/^[.*]*imageDirectory:[ \t]*([^\s]+)\s*$/im) {
+ $image_directory = $1;
+ $got_any_p = 1;
+ }
+ return $got_any_p;
+}
+
+
+sub get_cocoa_prefs($) {
+ my ($id) = @_;
+ print STDERR "$progname: reading Cocoa prefs: \"$id\"\n" if ($verbose > 1);
+ my $v = get_cocoa_pref_1 ($id, "imageDirectory");
+ $v = '~/Pictures' unless defined ($v); # Match default in XScreenSaverView
+ $image_directory = $v if defined ($v);
+}
+
+
+sub get_cocoa_pref_1($$) {
+ my ($id, $key) = @_;
+ # make sure there's nothing stupid/malicious in either string.
+ $id =~ s/[^-a-z\d. ]/_/gsi;
+ $key =~ s/[^-a-z\d. ]/_/gsi;
+ my $cmd = "defaults -currentHost read \"$id\" \"$key\"";
+
+ print STDERR "$progname: executing $cmd\n"
+ if ($verbose > 3);
+
+ my $val = `$cmd 2>/dev/null`;
+ $val =~ s/^\s+//s;
+ $val =~ s/\s+$//s;
+
+ print STDERR "$progname: Cocoa: $id $key = \"$val\"\n"
+ if ($verbose > 2);
+
+ $val = undef if ($val =~ m/^$/s);
+
+ return $val;
+}
+
+
sub error($) {
my ($err) = @_;
print STDERR "$progname: $err\n";
}
sub usage() {
- print STDERR "usage: $progname [--verbose] directory\n" .
+ print STDERR "usage: $progname [--verbose] [ directory-or-feed-url ]\n\n" .
" Prints the name of a randomly-selected image file. The directory\n" .
" is searched recursively. Images smaller than " .
"${min_image_width}x${min_image_height} are excluded.\n" .
"\n" .
" The directory may also be the URL of an RSS/Atom feed. Enclosed\n" .
- " images will be downloaded cached locally.\n" .
+ " images will be downloaded and cached locally.\n" .
"\n";
exit 1;
}
sub main() {
- my $dir = undef;
+ my $cocoa_id = undef;
+ my $abs_p = 0;
while ($_ = $ARGV[0]) {
shift @ARGV;
- if ($_ eq "--verbose") { $verbose++; }
- elsif (m/^-v+$/) { $verbose += length($_)-1; }
- elsif ($_ eq "--name") { } # ignored, for compatibility
- elsif ($_ eq "--spotlight") { $use_spotlight_p = 1; }
- elsif ($_ eq "--no-spotlight") { $use_spotlight_p = 0; }
- elsif ($_ eq "--cache") { $cache_p = 1; }
- elsif ($_ eq "--no-cache") { $cache_p = 0; }
- elsif (m/^-./) { usage; }
- elsif (!defined($dir)) { $dir = $_; }
- else { usage; }
+ if (m/^--?verbose$/s) { $verbose++; }
+ elsif (m/^-v+$/s) { $verbose += length($_)-1; }
+ elsif (m/^--?name$/s) { } # ignored, for compatibility
+ elsif (m/^--?spotlight$/s) { $use_spotlight_p = 1; }
+ elsif (m/^--?no-spotlight$/s) { $use_spotlight_p = 0; }
+ elsif (m/^--?cache$/s) { $cache_p = 1; }
+ elsif (m/^--?no-?cache$/s) { $cache_p = 0; }
+ elsif (m/^--?cocoa$/) { $cocoa_id = shift @ARGV; }
+ elsif (m/^--?abs(olute)?$/) { $abs_p = 1; }
+ elsif (m/^-./) { usage; }
+ elsif (!defined($image_directory)) { $image_directory = $_; }
+ else { usage; }
}
- usage unless (defined($dir));
+ # Most hacks (X11 and Cocoa) pass a --directory value on the command line,
+ # but if they don't, look it up from the resources. Currently this only
+ # happens with "glitchpeg" which invokes xscreensaver-getimage-file
+ # directly instead of going through the traditional path.
+ #
+ if (! $image_directory) {
+ if (!defined ($cocoa_id)) {
+ # see OSX/XScreenSaverView.m
+ $cocoa_id = $ENV{XSCREENSAVER_CLASSPATH};
+ }
- $dir =~ s@^feed:@http:@si;
+ if (defined ($cocoa_id)) {
+ get_cocoa_prefs($cocoa_id);
+ error ("no imageDirectory in $cocoa_id") unless $image_directory;
+ } else {
+ get_x11_prefs();
+ error ("no imageDirectory in X11 resources") unless $image_directory;
+ }
+ }
+
+ usage unless (defined($image_directory));
+
+ $image_directory =~ s@^feed:@http:@si;
- if ($dir =~ m/^https?:/si) {
+ if ($image_directory =~ m/^https?:/si) {
# ok
} else {
- $dir =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/"
- $dir =~ s@/+$@@s; # omit trailing /
+ $image_directory =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/"
+ $image_directory =~ s@/+$@@s; # omit trailing /
- if (! -d $dir) {
- print STDERR "$progname: $dir: not a directory or URL\n";
+ if (! -d $image_directory) {
+ print STDERR "$progname: $image_directory not a directory or URL\n";
usage;
}
}
- my $file = find_random_file ($dir);
+ my $file = find_random_file ($image_directory);
+
+ # With --absolute return fully qualified paths instead of relative to --dir.
+ if ($abs_p &&
+ $file !~ m@^/@ &&
+ $image_directory =~ m@^/@s) {
+ $file = "$image_directory/$file";
+ $file =~ s@//+@/@gs;
+ }
+
print STDOUT "$file\n";
}